Skip to content

Commit

Permalink
Revert "fix clippy errors with cargo clippy --fix"
Browse files Browse the repository at this point in the history
This reverts commit dfd10fa.

Signed-off-by: lucasliang <[email protected]>
  • Loading branch information
LykxSassinator committed Jan 10, 2024
1 parent 6fe7574 commit 483c87b
Show file tree
Hide file tree
Showing 15 changed files with 55 additions and 44 deletions.
8 changes: 4 additions & 4 deletions .github/workflows/rust.yml
Original file line number Diff line number Diff line change
Expand Up @@ -19,15 +19,15 @@ jobs:
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly-2022-11-15
toolchain: nightly-2022-11-03
override: true
components: rustfmt, clippy, rust-src
- uses: Swatinem/rust-cache@v1
with:
sharedKey: ${{ matrix.os }}
- name: Cache dependencies
if: ${{ matrix.os == 'ubuntu-latest' }}
run: if [[ ! -e ~/.cargo/bin/grcov ]]; then cargo install --locked grcov; fi
run: if [[ ! -e ~/.cargo/bin/grcov ]]; then cargo install grcov; fi
- name: Format
run: |
make format
Expand Down Expand Up @@ -60,7 +60,7 @@ jobs:
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: 1.66.0
toolchain: 1.61.0
override: true
components: rustfmt, clippy, rust-src
- uses: Swatinem/rust-cache@v1
Expand All @@ -87,7 +87,7 @@ jobs:
uses: actions-rs/toolchain@v1
with:
profile: minimal
toolchain: nightly-2022-11-15
toolchain: nightly-2022-11-03
override: true
components: llvm-tools-preview
- uses: Swatinem/rust-cache@v1
Expand Down
4 changes: 2 additions & 2 deletions Cargo.toml
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@ name = "raft-engine"
version = "0.3.0"
authors = ["The TiKV Project Developers"]
edition = "2018"
rust-version = "1.66.0"
rust-version = "1.61.0"
description = "A persistent storage engine for Multi-Raft logs"
readme = "README.md"
repository = "https://github.com/tikv/raft-engine"
Expand Down Expand Up @@ -69,7 +69,7 @@ raft = { git = "https://github.com/tikv/raft-rs", branch = "master", default-fea
rand = "0.8"
rand_distr = "0.4"
tempfile = "3.1"
toml = "0.8"
toml = "0.7"

[features]
internals = []
Expand Down
2 changes: 1 addition & 1 deletion examples/fork.rs
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,6 @@ fn main() {
..Default::default()
};
let fs = Arc::new(DefaultFileSystem);
Engine::<_, _>::fork(&cfg, fs, target).unwrap();
Engine::<_, _>::fork(&cfg, fs, &target).unwrap();
println!("success!");
}
2 changes: 1 addition & 1 deletion src/file_pipe_log/format.rs
Original file line number Diff line number Diff line change
Expand Up @@ -329,7 +329,7 @@ mod tests {
file_context.id.seq = 10;
file_context.version = Version::V2;
assert_eq!(file_context.get_signature().unwrap(), 10);
let abnormal_seq = (file_context.id.seq << 32) + 100_u64;
let abnormal_seq = (file_context.id.seq << 32) as u64 + 100_u64;
file_context.id.seq = abnormal_seq;
assert_ne!(file_context.get_signature().unwrap() as u64, abnormal_seq);
assert_eq!(file_context.get_signature().unwrap(), 100);
Expand Down
12 changes: 8 additions & 4 deletions src/file_pipe_log/log_file.rs
Original file line number Diff line number Diff line change
Expand Up @@ -95,9 +95,13 @@ impl<F: FileSystem> LogFileWriter<F> {
let new_written = self.written + buf.len();
if self.capacity < new_written {
let _t = StopWatch::new(&*LOG_ALLOCATE_DURATION_HISTOGRAM);
let alloc = target_size_hint
.saturating_sub(self.capacity)
.clamp(new_written - self.capacity, FILE_ALLOCATE_SIZE);
let alloc = std::cmp::max(
new_written - self.capacity,
std::cmp::min(
FILE_ALLOCATE_SIZE,
target_size_hint.saturating_sub(self.capacity),
),
);
if let Err(e) = self.writer.allocate(self.capacity, alloc) {
warn!("log file allocation failed: {}", e);
}
Expand Down Expand Up @@ -168,7 +172,7 @@ impl<F: FileSystem> LogFileReader<F> {
}

pub fn read(&mut self, handle: FileBlockHandle) -> Result<Vec<u8>> {
let mut buf = vec![0; handle.len];
let mut buf = vec![0; handle.len as usize];
let size = self.read_to(handle.offset, &mut buf)?;
buf.truncate(size);
Ok(buf)
Expand Down
4 changes: 2 additions & 2 deletions src/file_pipe_log/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -266,13 +266,13 @@ pub mod debug {
let file_system = Arc::new(DefaultFileSystem);
// An unrelated sub-directory.
let unrelated_dir = dir.path().join(Path::new("random_dir"));
std::fs::create_dir(unrelated_dir).unwrap();
std::fs::create_dir(&unrelated_dir).unwrap();
// An unrelated file.
let unrelated_file_path = dir.path().join(Path::new("random_file"));
let _unrelated_file = std::fs::File::create(&unrelated_file_path).unwrap();
// A corrupted log file.
let corrupted_file_path = FileId::dummy(LogQueue::Append).build_file_path(dir.path());
let _corrupted_file = std::fs::File::create(corrupted_file_path).unwrap();
let _corrupted_file = std::fs::File::create(&corrupted_file_path).unwrap();
// An empty log file.
let empty_file_path = FileId::dummy(LogQueue::Rewrite).build_file_path(dir.path());
let mut writer = build_file_writer(
Expand Down
17 changes: 10 additions & 7 deletions src/file_pipe_log/pipe.rs
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ impl<F: FileSystem> SinglePipe<F> {
let path = file_id.build_file_path(&paths[path_id]);
active_files.push(File {
seq: file_id.seq,
handle: file_system.create(path)?.into(),
handle: file_system.create(&path)?.into(),
format: default_format,
path_id,
});
Expand Down Expand Up @@ -152,7 +152,7 @@ impl<F: FileSystem> SinglePipe<F> {
/// filesystem.
fn sync_dir(&self, path_id: PathId) -> Result<()> {
debug_assert!(!self.paths.is_empty());
std::fs::File::open(PathBuf::from(&self.paths[path_id])).and_then(|d| d.sync_all())?;
std::fs::File::open(&PathBuf::from(&self.paths[path_id])).and_then(|d| d.sync_all())?;
Ok(())
}

Expand Down Expand Up @@ -194,7 +194,7 @@ impl<F: FileSystem> SinglePipe<F> {
};
let path_id = find_available_dir(&self.paths, self.target_file_size);
let path = new_file_id.build_file_path(&self.paths[path_id]);
Ok((path_id, self.file_system.create(path)?))
Ok((path_id, self.file_system.create(&path)?))
}

/// Returns a shared [`LogFd`] for the specified file sequence number.
Expand Down Expand Up @@ -542,7 +542,7 @@ pub(crate) fn find_available_dir(paths: &Paths, target_size: usize) -> PathId {
// space usage.
if paths.len() > 1 {
for (t, p) in paths.iter().enumerate() {
if let Ok(disk_stats) = fs2::statvfs(p) {
if let Ok(disk_stats) = fs2::statvfs(&p) {
if target_size <= disk_stats.available_space() as usize {
return t;
}
Expand Down Expand Up @@ -654,20 +654,23 @@ mod tests {

let file_handle = pipe_log.append(queue, &mut &s_content).unwrap();
assert_eq!(file_handle.id.seq, 3);
assert_eq!(file_handle.offset, header_size + s_content.len() as u64);
assert_eq!(
file_handle.offset,
header_size as u64 + s_content.len() as u64
);

let content_readed = pipe_log
.read_bytes(FileBlockHandle {
id: FileId { queue, seq: 3 },
offset: header_size,
offset: header_size as u64,
len: s_content.len(),
})
.unwrap();
assert_eq!(content_readed, s_content);
// try to fetch abnormal entry
let abnormal_content_readed = pipe_log.read_bytes(FileBlockHandle {
id: FileId { queue, seq: 12 }, // abnormal seq
offset: header_size,
offset: header_size as u64,
len: s_content.len(),
});
assert!(abnormal_content_readed.is_err());
Expand Down
6 changes: 3 additions & 3 deletions src/file_pipe_log/pipe_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -162,7 +162,7 @@ impl<F: FileSystem> DualPipesBuilder<F> {
} else {
file_id.build_file_path(dir)
};
if self.file_system.exists_metadata(path) {
if self.file_system.exists_metadata(&path) {
delete_start = Some(i.saturating_sub(1) * files[0].seq / max_sample + 1);
break;
}
Expand Down Expand Up @@ -494,7 +494,7 @@ impl<F: FileSystem> DualPipesBuilder<F> {
let path_id = find_available_dir(&self.dirs, target_file_size);
let root_path = &self.dirs[path_id];
let path = root_path.join(build_recycled_file_name(seq));
let handle = Arc::new(self.file_system.create(path)?);
let handle = Arc::new(self.file_system.create(&path)?);
let mut writer = self.file_system.new_writer(handle.clone())?;
let mut written = 0;
let buf = vec![0; std::cmp::min(PREFILL_BUFFER_SIZE, target_file_size)];
Expand Down Expand Up @@ -533,7 +533,7 @@ impl<F: FileSystem> DualPipesBuilder<F> {
let f = self.recycled_files.pop().unwrap();
let root_path = &self.dirs[f.path_id];
let path = root_path.join(build_recycled_file_name(f.seq));
let _ = self.file_system.delete(path);
let _ = self.file_system.delete(&path);
}
Ok(())
}
Expand Down
4 changes: 2 additions & 2 deletions src/filter.rs
Original file line number Diff line number Diff line change
Expand Up @@ -339,7 +339,7 @@ impl RhaiFilterMachine {
}
// Delete backup file and defuse the guard.
for (bak, guard) in guards.into_iter() {
let _ = std::fs::remove_file(bak);
let _ = std::fs::remove_file(&bak);
let _ = ScopeGuard::into_inner(guard);
}
Ok(())
Expand Down Expand Up @@ -416,7 +416,7 @@ pub struct RhaiFilterMachineFactory {
impl RhaiFilterMachineFactory {
pub fn from_script(script: String) -> Self {
let engine = Engine::new();
let ast = engine.compile(script).unwrap();
let ast = engine.compile(&script).unwrap();
engine.run_ast_with_scope(&mut Scope::new(), &ast).unwrap();
Self {
engine: Arc::new(engine),
Expand Down
10 changes: 5 additions & 5 deletions src/log_batch.rs
Original file line number Diff line number Diff line change
Expand Up @@ -87,7 +87,7 @@ impl EntryIndexes {
let entry_len = (t as u32) - *entries_size;
let entry_index = EntryIndex {
index,
entry_offset: *entries_size,
entry_offset: *entries_size as u32,
entry_len,
..Default::default()
};
Expand Down Expand Up @@ -850,7 +850,7 @@ impl LogBatch {
handle.offset += LOG_BATCH_HEADER_LEN as u64;
match self.buf_state {
BufState::Sealed(_, entries_len) => {
debug_assert!(LOG_BATCH_HEADER_LEN + entries_len < handle.len);
debug_assert!(LOG_BATCH_HEADER_LEN + entries_len < handle.len as usize);
handle.len = entries_len;
}
_ => unreachable!(),
Expand Down Expand Up @@ -1311,7 +1311,7 @@ mod tests {
offset: 0,
};
let old_approximate_size = batch.approximate_size();
let len = batch.finish_populate(usize::from(compress)).unwrap();
let len = batch.finish_populate(if compress { 1 } else { 0 }).unwrap();
assert!(old_approximate_size >= len);
assert_eq!(batch.approximate_size(), len);
let mut batch_handle = mocked_file_block_handle;
Expand Down Expand Up @@ -1391,7 +1391,7 @@ mod tests {
assert_eq!(decoded_item_batch, item_batch);
assert!(decoded_item_batch.approximate_size() >= len - offset);

let entries = &encoded[LOG_BATCH_HEADER_LEN..offset];
let entries = &encoded[LOG_BATCH_HEADER_LEN..offset as usize];
for item in decoded_item_batch.items.iter() {
if let LogItemContent::EntryIndexes(entry_indexes) = &item.content {
if !entry_indexes.0.is_empty() {
Expand Down Expand Up @@ -1503,7 +1503,7 @@ mod tests {
.unwrap();

// decode and assert entries
let entry_bytes = &encoded[LOG_BATCH_HEADER_LEN..offset];
let entry_bytes = &encoded[LOG_BATCH_HEADER_LEN..offset as usize];
for item in decoded_item_batch.items.iter() {
match &item.content {
LogItemContent::EntryIndexes(entry_indexes) => {
Expand Down
6 changes: 5 additions & 1 deletion src/pipe_log.rs
Original file line number Diff line number Diff line change
Expand Up @@ -192,7 +192,11 @@ pub trait PipeLog: Sized {

/// Returns the oldest file ID that is newer than `position`% of all files.
fn file_at(&self, queue: LogQueue, mut position: f64) -> FileSeq {
position = position.clamp(0.0, 1.0);
if position > 1.0 {
position = 1.0;
} else if position < 0.0 {
position = 0.0;
}
let (first, active) = self.file_span(queue);
let count = active - first + 1;
first + (count as f64 * position) as u64
Expand Down
12 changes: 6 additions & 6 deletions src/swappy_allocator.rs
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ pub struct SwappyAllocator<A: Allocator>(Arc<SwappyAllocatorCore<A>>);
impl<A: Allocator> SwappyAllocator<A> {
pub fn new_over(path: &Path, budget: usize, alloc: A) -> SwappyAllocator<A> {
if path.exists() {
if let Err(e) = std::fs::remove_dir_all(path) {
if let Err(e) = std::fs::remove_dir_all(&path) {
error!(
"Failed to clean up old swap directory: {}. \
There might be obsolete swap files left in {}.",
Expand Down Expand Up @@ -280,7 +280,7 @@ impl Page {
fail::fail_point!("swappy::page::new_failure", |_| None);
if !root.exists() {
// Create directory only when it's needed.
std::fs::create_dir_all(root)
std::fs::create_dir_all(&root)
.map_err(|e| error!("Failed to create swap directory: {}.", e))
.ok()?;
}
Expand All @@ -289,7 +289,7 @@ impl Page {
.read(true)
.write(true)
.create(true)
.open(path)
.open(&path)
.map_err(|e| error!("Failed to open swap file: {}", e))
.ok()?;
f.set_len(size as u64)
Expand Down Expand Up @@ -344,7 +344,7 @@ impl Page {
fn release(self, root: &Path) {
debug_assert_eq!(self.ref_counter, 0);
let path = root.join(Self::page_file_name(self.seq));
if let Err(e) = std::fs::remove_file(path) {
if let Err(e) = std::fs::remove_file(&path) {
warn!("Failed to delete swap file: {}", e);
}
SWAP_FILE_COUNT.dec();
Expand Down Expand Up @@ -825,7 +825,7 @@ mod tests {
// test_eq_after_rotation
// test that two deques are equal even if elements are laid out differently
let len = 28;
let mut ring: VecDeque<i32> = collect(0..len, allocator.clone());
let mut ring: VecDeque<i32> = collect(0..len as i32, allocator.clone());
let mut shifted = ring.clone();
for _ in 0..10 {
// shift values 1 step to the right by pop, sub one, push
Expand Down Expand Up @@ -1024,7 +1024,7 @@ mod tests {
// test_extend_ref
let mut v = VecDeque::new_in(allocator.clone());
v.push_back(1);
v.extend([2, 3, 4]);
v.extend(&[2, 3, 4]);

assert_eq!(v.len(), 4);
assert_eq!(v[0], 1);
Expand Down
6 changes: 3 additions & 3 deletions src/util.rs
Original file line number Diff line number Diff line change
Expand Up @@ -77,11 +77,11 @@ impl Serialize for ReadableSize {
write!(buffer, "{}PiB", size / PIB).unwrap();
} else if size % TIB == 0 {
write!(buffer, "{}TiB", size / TIB).unwrap();
} else if size % GIB == 0 {
} else if size % GIB as u64 == 0 {
write!(buffer, "{}GiB", size / GIB).unwrap();
} else if size % MIB == 0 {
} else if size % MIB as u64 == 0 {
write!(buffer, "{}MiB", size / MIB).unwrap();
} else if size % KIB == 0 {
} else if size % KIB as u64 == 0 {
write!(buffer, "{}KiB", size / KIB).unwrap();
} else {
return serializer.serialize_u64(size);
Expand Down
4 changes: 2 additions & 2 deletions stress/src/main.rs
Original file line number Diff line number Diff line change
Expand Up @@ -380,8 +380,8 @@ impl Summary {
let stddev = statistical::standard_deviation(&self.thread_qps, None);
stddev / median
} else {
let first = *self.thread_qps.first().unwrap();
let last = *self.thread_qps.last().unwrap();
let first = *self.thread_qps.first().unwrap() as f64;
let last = *self.thread_qps.last().unwrap() as f64;
f64::abs(first - last) / (first + last)
};
println!("Fairness = {:.01}%", 100.0 - fairness * 100.0);
Expand Down
2 changes: 1 addition & 1 deletion tests/benches/bench_recovery.rs
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ fn dir_size(path: &str) -> ReadableSize {
ReadableSize(
std::fs::read_dir(PathBuf::from(path))
.unwrap()
.map(|entry| std::fs::metadata(entry.unwrap().path()).unwrap().len())
.map(|entry| std::fs::metadata(entry.unwrap().path()).unwrap().len() as u64)
.sum(),
)
}
Expand Down

0 comments on commit 483c87b

Please sign in to comment.