Skip to content

Commit

Permalink
Enable flake8-bugbear B020 lint (pytorch#110823)
Browse files Browse the repository at this point in the history
Fixes part of pytorch#106571

Pull Request resolved: pytorch#110823
Approved by: https://github.com/Skylion007
  • Loading branch information
dmage authored and pytorchmergebot committed Oct 24, 2023
1 parent b600aed commit 192477b
Show file tree
Hide file tree
Showing 23 changed files with 70 additions and 67 deletions.
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
Expand Up @@ -14,7 +14,7 @@ ignore =
# to line this up with executable bit
EXE001,
# these ignores are from flake8-bugbear; please fix!
B007,B008,B017,B019,B020,B023,B026,B028,B903,B904,B905,B906,B907
B007,B008,B017,B019,B023,B026,B028,B903,B904,B905,B906,B907
# these ignores are from flake8-comprehensions; please fix!
C407,
# these ignores are from flake8-logging-format; please fix!
Expand Down
4 changes: 2 additions & 2 deletions caffe2/python/net_builder_test.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def test_ops(self):
plan.AddStep(to_execution_step(nb))
ws = workspace.C.Workspace()
ws.run(plan)
expected = [
expected_results = [
(y, 5),
(z, False),
(w, True),
Expand All @@ -99,7 +99,7 @@ def test_ops(self):
(p, 2),
(q, 3),
]
for b, expected in expected:
for b, expected in expected_results:
actual = ws.blobs[str(b)].fetch()
self.assertEqual(actual, expected)

Expand Down
4 changes: 2 additions & 2 deletions caffe2/python/schema.py
Original file line number Diff line number Diff line change
Expand Up @@ -1081,7 +1081,7 @@ def from_column_list(
'col_names and col_blobs must have the same length.'
)
root = _SchemaNode('root', 'Struct')
for col_name, col_type, col_blob, col_metadata in zip(
for col_name, col_type, col_blob, col_md in zip(
col_names, col_types, col_blobs, col_metadata
):
columns = col_name.split(FIELD_SEPARATOR)
Expand All @@ -1095,7 +1095,7 @@ def from_column_list(
field = Scalar(
dtype=col_type,
blob=col_blob,
metadata=col_metadata
metadata=col_md
)
next = current.add_child(name, type_str)
if field is not None:
Expand Down
2 changes: 1 addition & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -28,7 +28,7 @@ ignore = [
# these ignores are from flake8-bugbear; please fix!
"B007", "B008", "B017",
"B018", # Useless expression
"B019", "B020",
"B019",
"B023", "B026",
"B028", # No explicit `stacklevel` keyword argument found
"B904",
Expand Down
4 changes: 2 additions & 2 deletions scripts/release_notes/test_release_notes.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,8 +34,8 @@ def test_read_write(self):
expected.write_to_disk()

commit_list = CommitList.from_existing(commit_list_path)
for commit, expected in zip(commit_list.commits, expected.commits):
self.assertEqual(commit, expected)
for commit, expected_commit in zip(commit_list.commits, expected.commits):
self.assertEqual(commit, expected_commit)

def test_update_to(self):
with tempfile.TemporaryDirectory() as tempdir:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -880,8 +880,8 @@ def assert_rank_consistency(

# global world size == sum of all the role world sizes
expected_world_size = sum(expected_role_world_sizes.values())
for role, run_results in run_results.items():
for result in run_results:
for role, results in run_results.items():
for result in results:
res = result.return_values
for role_info in res.values():
rank = role_info.rank
Expand Down
14 changes: 7 additions & 7 deletions test/distributed/fsdp/test_fsdp_dtensor_state_dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,11 +143,11 @@ def test_dtensor_sharded_tensor_state_dict_identical(
sharded_tensor_osd = FSDP.optim_state_dict(ref_model, ref_optim)

# Check dtensor and sharded_tensor model state dict values are identical
for dtensor_sd, sharded_tensor_sd in zip(
for dtensor_sd_item, sharded_tensor_sd_item in zip(
dtensor_sd.items(), sharded_tensor_sd.items()
):
k1, v1 = dtensor_sd
k2, v2 = sharded_tensor_sd
k1, v1 = dtensor_sd_item
k2, v2 = sharded_tensor_sd_item
self.assertEqual(k1, k2)

# if the ShardedTensor is an empty shard,
Expand Down Expand Up @@ -227,15 +227,15 @@ def test_dtensor_sharded_optim_load_state_dict(
new_optim_state_dict = FSDP.optim_state_dict(model, optim)

# Check whether new_optim_state_dict is the same as ref_optim_state_dict.
for new_optim_state_dict, ref_optim_state_dict in zip(
for new_optim_state_dict_item, ref_optim_state_dict_item in zip(
new_optim_state_dict["state"].items(),
ref_optim_state_dict["state"].items(),
):
# check FQN are the same
self.assertEqual(new_optim_state_dict[0], ref_optim_state_dict[0])
self.assertEqual(new_optim_state_dict_item[0], ref_optim_state_dict_item[0])
for new_optim_hyper_param, ref_optim_hyper_param in zip(
new_optim_state_dict[1].items(),
ref_optim_state_dict[1].items(),
new_optim_state_dict_item[1].items(),
ref_optim_state_dict_item[1].items(),
):
k1, v1 = new_optim_hyper_param
k2, v2 = ref_optim_hyper_param
Expand Down
14 changes: 7 additions & 7 deletions test/distributed/fsdp/test_hsdp_dtensor_state_dict.py
Original file line number Diff line number Diff line change
Expand Up @@ -154,11 +154,11 @@ def test_dtensor_sharded_tensor_state_dict_identical(self, offload_to_cpu):
sharded_tensor_osd = FSDP.optim_state_dict(ref_model, ref_optim)

# Check dtensor and sharded_tensor model state dict values are identical
for dtensor_sd, sharded_tensor_sd in zip(
for dtensor_sd_item, sharded_tensor_sd_item in zip(
dtensor_sd.items(), sharded_tensor_sd.items()
):
k1, v1 = dtensor_sd
k2, v2 = sharded_tensor_sd
k1, v1 = dtensor_sd_item
k2, v2 = sharded_tensor_sd_item
self.assertEqual(k1, k2)

self.assertEqual(type(v1), DTensor)
Expand Down Expand Up @@ -225,15 +225,15 @@ def test_dtensor_sharded_optim_load_state_dict(self, offload_to_cpu):
new_optim_state_dict = FSDP.optim_state_dict(model, optim)

# Check whether new_optim_state_dict is the same as ref_optim_state_dict.
for new_optim_state_dict, ref_optim_state_dict in zip(
for new_optim_state_dict_item, ref_optim_state_dict_item in zip(
new_optim_state_dict["state"].items(),
ref_optim_state_dict["state"].items(),
):
# check FQN are the same
self.assertEqual(new_optim_state_dict[0], ref_optim_state_dict[0])
self.assertEqual(new_optim_state_dict_item[0], ref_optim_state_dict_item[0])
for new_optim_hyper_param, ref_optim_hyper_param in zip(
new_optim_state_dict[1].items(),
ref_optim_state_dict[1].items(),
new_optim_state_dict_item[1].items(),
ref_optim_state_dict_item[1].items(),
):
k1, v1 = new_optim_hyper_param
k2, v2 = ref_optim_hyper_param
Expand Down
14 changes: 7 additions & 7 deletions test/functorch/test_vmap.py
Original file line number Diff line number Diff line change
Expand Up @@ -3423,9 +3423,9 @@ def test():
sample_input = error_input.sample_input
args = (sample_input.input,) + tuple(sample_input.args)
kwargs = sample_input.kwargs
for args, in_dims, _ in generate_vmap_inputs(args, {}):
for batched_args, in_dims, _ in generate_vmap_inputs(args, {}):
with self.assertRaises(Exception):
vmap(op, in_dims)(*args, **kwargs)
vmap(op, in_dims)(*batched_args, **kwargs)

# Sample inputs check
sample_inputs_op = {
Expand Down Expand Up @@ -3455,16 +3455,16 @@ def test():
continue
kwargs = sample_input.kwargs
is_batch_norm_and_training = is_batch_norm_training(op.name, kwargs)
for args, in_dims, _ in generate_vmap_inputs(
for batched_args, in_dims, _ in generate_vmap_inputs(
args, {}, is_batch_norm_and_training=is_batch_norm_and_training):
for func in aliases:
self.vmap_outplace_test(func, args, kwargs, in_dims, check_shape_only, postprocess_fn)
self.vmap_outplace_test(func, batched_args, kwargs, in_dims, check_shape_only, postprocess_fn)
if op.name in skip_inplace:
continue
if not is_valid_inplace_sample_input(sample_input, op, op.inplace_variant):
continue
for func in inplace_aliases:
self.vmap_inplace_test(func, args, kwargs, in_dims, postprocess_fn)
self.vmap_inplace_test(func, batched_args, kwargs, in_dims, postprocess_fn)

if check_has_batch_rule:
check_vmap_fallback(self, test, op)
Expand Down Expand Up @@ -4195,11 +4195,11 @@ def push_vjp(leaf, gout):
gout = torch.randn(2, 2, device=device)
args = (leaf, gout)

for args, in_dims, _, in generate_vmap_inputs(args, {}):
for batched_args, in_dims, _, in generate_vmap_inputs(args, {}):
if in_dims[1] is None:
# triggers some composite compliance problem
continue
self.vmap_outplace_test(push_vjp, args, {}, in_dims)
self.vmap_outplace_test(push_vjp, batched_args, {}, in_dims)

def test_advanced_indexing(self, device):
def test(f, args):
Expand Down
4 changes: 2 additions & 2 deletions test/jit/test_freezing.py
Original file line number Diff line number Diff line change
Expand Up @@ -2691,9 +2691,9 @@ def test_freeze_conv_relu_fusion(self):
with set_default_dtype(torch.float):
conv_bias = [True, False]
conv_ops = [nn.Conv2d, nn.Conv3d]
add_z = [True, False]
use_add_z = [True, False]
use_tracing = [True, False]
for use_bias, conv, add_z, tracing in product(conv_bias, conv_ops, add_z, use_tracing):
for use_bias, conv, add_z, tracing in product(conv_bias, conv_ops, use_add_z, use_tracing):
class Net(nn.Module):
def __init__(self, in_channels, out_channels, **kwargs):
super().__init__()
Expand Down
4 changes: 2 additions & 2 deletions test/nn/test_embedding.py
Original file line number Diff line number Diff line change
Expand Up @@ -944,10 +944,10 @@ def test_per_sample_weights_new_offsets(mode, trainable_scale, include_last_offs
atol=dtype2prec_DONTUSE[dtypes[2]], rtol=0)

trainable_scale = (True, False)
include_last_offset = (True, False)
include_last_offset_list = (True, False)
modes = (('sum', False), ('sum', True), ('max', False), ('mean', False))
for (mode, has_weight), trainable, include_last_offset in itertools.product(
modes, trainable_scale, include_last_offset
modes, trainable_scale, include_last_offset_list
):
test_per_sample_weights_new_offsets(
mode, trainable, include_last_offset, has_weight
Expand Down
4 changes: 2 additions & 2 deletions test/quantization/core/test_workflow_ops.py
Original file line number Diff line number Diff line change
Expand Up @@ -364,8 +364,8 @@ def test_forward_per_tensor_cachemask_cuda(self):
def _test_backward_per_tensor_cachemask_impl(self, device):
float_types = (torch.float32, torch.float16, torch.float64)
torch_types = (torch.qint8, torch.quint8)
tensor_qparam = (True, False)
for float_type, torch_type, tensor_qparam in itertools.product(float_types, torch_types, tensor_qparam):
tensor_qparams = (True, False)
for float_type, torch_type, tensor_qparam in itertools.product(float_types, torch_types, tensor_qparams):
X = torch.randn(4, 8).to(device).to(float_type)
X.requires_grad_()
# pick the scale + zp so that some values get clipped
Expand Down
4 changes: 2 additions & 2 deletions test/test_linalg.py
Original file line number Diff line number Diff line change
Expand Up @@ -2413,7 +2413,7 @@ def run_subtest(actual_rank, matrix_size, batches, device, svd_lowrank, **option
self.assertEqual(v.mT.matmul(V).det().abs(), torch.ones(batches, device=device, dtype=dtype))

all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
for actual_rank, size, all_batches in [ # noqa: B020
(2, (17, 4), all_batches),
(4, (17, 4), all_batches),
(4, (17, 17), all_batches),
Expand Down Expand Up @@ -7415,7 +7415,7 @@ def run_subtest(guess_rank, actual_rank, matrix_size, batches, device, pca, **op
self.assertEqual(s[..., :actual_rank], S[..., :actual_rank])

all_batches = [(), (1,), (3,), (2, 3)]
for actual_rank, size, all_batches in [
for actual_rank, size, all_batches in [ # noqa: B020
(2, (17, 4), all_batches),
(2, (100, 4), all_batches),
(6, (100, 40), all_batches),
Expand Down
4 changes: 2 additions & 2 deletions test/test_nn.py
Original file line number Diff line number Diff line change
Expand Up @@ -11760,15 +11760,15 @@ def test_cross_entropy_label_smoothing_weight_ignore_indices(self, device):
reductions = ['none', 'sum', 'mean']
label_smoothings = [0.05, 0.15]

weight = torch.tensor([0.3, 0.6], device=device)
wgt = torch.tensor([0.3, 0.6], device=device)
inp1 = torch.tensor([[0.3, 0.4], [1, 2]], device=device)
inp2 = torch.tensor([[0.3, 0.6], [1, 2]], device=device)

targ_default_ignore_index = torch.tensor([-100, 1], device=device)
targ_negative_ignore_index = torch.tensor([-2, 1], device=device)
targ_positive_ignore_index = torch.tensor([2, 1], device=device)

for reduction, label_smoothing, weight in product(reductions, label_smoothings, (None, weight)):
for reduction, label_smoothing, weight in product(reductions, label_smoothings, (None, wgt)):
def check_equal(loss, inp_targ_1, inp_targ_2):
inp1, targ1 = inp_targ_1
inp2, targ2 = inp_targ_2
Expand Down
4 changes: 2 additions & 2 deletions test/torch_np/numpy_tests/lib/test_function_base.py
Original file line number Diff line number Diff line change
Expand Up @@ -734,9 +734,9 @@ def test_n(self):
x = list(range(3))
assert_raises(ValueError, diff, x, n=-1)
output = [diff(x, n=n) for n in range(1, 5)]
expected = [[1, 1], [0], [], []]
expected_output = [[1, 1], [0], [], []]
# assert_(diff(x, n=0) is x)
for n, (expected, out) in enumerate(zip(expected, output), start=1):
for n, (expected, out) in enumerate(zip(expected_output, output), start=1):
assert_(type(out) is np.ndarray)
assert_array_equal(out, expected)
assert_equal(out.dtype, np.int_)
Expand Down
18 changes: 9 additions & 9 deletions torch/_inductor/lowering.py
Original file line number Diff line number Diff line change
Expand Up @@ -323,19 +323,19 @@ def broadcast_symbolic_shapes(a, b):
are symbolic sympy formulas.
"""
output = []
for a, b in itertools.zip_longest(
for x, y in itertools.zip_longest(
reversed(a), reversed(b), fillvalue=sympy.Integer(1)
):
if b == 1:
output.append(a)
elif a == 1:
output.append(b)
if y == 1:
output.append(x)
elif x == 1:
output.append(y)
else:
V.graph.sizevars.guard_equals(a, b)
if len(sympy.expand(b).free_symbols) < len(sympy.expand(a).free_symbols):
output.append(b) # prefer shorter formula
V.graph.sizevars.guard_equals(x, y)
if len(sympy.expand(y).free_symbols) < len(sympy.expand(x).free_symbols):
output.append(y) # prefer shorter formula
else:
output.append(a)
output.append(x)
return tuple(reversed(output))


Expand Down
3 changes: 2 additions & 1 deletion torch/_inductor/triton_heuristics.py
Original file line number Diff line number Diff line change
Expand Up @@ -540,8 +540,9 @@ def _find_names(obj):
import inspect

frame = inspect.currentframe()
for frame in iter(lambda: frame.f_back, None): # type: ignore[union-attr]
while frame is not None:
frame.f_locals
frame = frame.f_back
obj_names = []
for referrer in gc.get_referrers(obj):
if isinstance(referrer, dict):
Expand Down
10 changes: 6 additions & 4 deletions torch/autograd/functional.py
Original file line number Diff line number Diff line change
Expand Up @@ -550,9 +550,9 @@ def jvp(tangents):
is_outputs_tuple, outputs = output_info
# Step 3: for each of the output tangents, split along dim 0
jacobian_input_output = []
for jac, output_i in zip(outputs_before_split, outputs):
for jac_output_i, output_i in zip(outputs_before_split, outputs):
jacobian_output_i_output = []
for jac, input_j in zip(jac.split(input_numels, dim=0), inputs):
for jac, input_j in zip(jac_output_i.split(input_numels, dim=0), inputs):
# We need to transpose the Jacobian because in forward AD, the
# batch dimension represents that of the inputs
jacobian_input_i_output_j = jac.permute(*range(1, jac.ndim), 0).reshape(
Expand Down Expand Up @@ -758,9 +758,11 @@ def vjp(grad_output):
# Step 3: The returned jacobian is one big tensor per input. In this step,
# we split each Tensor by output.
jacobian_input_output = []
for jac, input_i in zip(jacobians_of_flat_output, inputs):
for jac_input_i, input_i in zip(jacobians_of_flat_output, inputs):
jacobian_input_i_output = []
for jac, output_j in zip(jac.split(output_numels, dim=0), outputs):
for jac, output_j in zip(
jac_input_i.split(output_numels, dim=0), outputs
):
jacobian_input_i_output_j = jac.view(output_j.shape + input_i.shape)
jacobian_input_i_output.append(jacobian_input_i_output_j)
jacobian_input_output.append(jacobian_input_i_output)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -1262,8 +1262,8 @@ def generate_constraints_node(self, n: Node, counter):
if isinstance(t, torch.Tensor):
if len(t.shape) > 0:
res = []
for t in t.shape:
res.append(t)
for d in t.shape:
res.append(d)
attr_type = TensorType(res)
output, counter = gen_tvar(counter)
self.symbol_dict[n] = output
Expand Down
4 changes: 2 additions & 2 deletions torch/nn/parallel/distributed.py
Original file line number Diff line number Diff line change
Expand Up @@ -1286,8 +1286,8 @@ def model_parameters(m):
)
yield from ps

for m in m.modules() if recurse else [m]:
yield from model_parameters(m)
for mod in m.modules() if recurse else [m]:
yield from model_parameters(mod)

def _check_default_group(self):
pickle_not_supported = False
Expand Down
2 changes: 1 addition & 1 deletion torch/testing/_internal/common_device_type.py
Original file line number Diff line number Diff line change
Expand Up @@ -458,7 +458,7 @@ def dtype_parametrize_fn(test, generic_cls, device_cls, dtypes=dtypes):
parametrize_fn = compose_parametrize_fns(dtype_parametrize_fn, parametrize_fn)

# Instantiate the parametrized tests.
for (test, test_suffix, param_kwargs, decorator_fn) in parametrize_fn(test, generic_cls, cls):
for (test, test_suffix, param_kwargs, decorator_fn) in parametrize_fn(test, generic_cls, cls): # noqa: B020
test_suffix = '' if test_suffix == '' else '_' + test_suffix
device_suffix = '_' + cls.device_type

Expand Down
6 changes: 3 additions & 3 deletions torch/testing/_internal/common_methods_invocations.py
Original file line number Diff line number Diff line change
Expand Up @@ -4926,7 +4926,7 @@ def sample_inputs_put(op_info, device, dtype, requires_grad, **kwargs):
tgt_gen = (make_arg(size) for size in tgt_sizes)
idx = make_idx((0,), high=1)
src = make_arg((0,))
for tgt, acc in product(tgt, (True, False)):
for tgt, acc in product(tgt_gen, (True, False)):
yield SampleInput(input=tgt.clone().requires_grad_(requires_grad),
args=(idx.clone(),
src.clone().requires_grad_(requires_grad),
Expand Down Expand Up @@ -8190,9 +8190,9 @@ def sample_inputs_scaled_dot_product_attention(op_info, device, dtype, requires_

qkv_shapes = [(dim_3_q_shape, dim_3_kv_shape), (dim_4_q_shape, dim_4_kv_shape), broadcast_tuple]
samples = []
for qkv_shapes, is_causal, dropout_p in product(
for qkv_shape, is_causal, dropout_p in product(
qkv_shapes, [True, False], [0.0, 0.5]):
shape_q, shape_kv = qkv_shapes
shape_q, shape_kv = qkv_shape
samples.append(SampleInput(
make(shape_q),
make(shape_kv),
Expand Down
Loading

0 comments on commit 192477b

Please sign in to comment.