diff --git a/.lintrunner.toml b/.lintrunner.toml index 13e6393dbecf71..4d47a8b3b4f8b3 100644 --- a/.lintrunner.toml +++ b/.lintrunner.toml @@ -2666,6 +2666,6 @@ init_command = [ 'python3', 'tools/linter/adapters/pip_init.py', '--dry-run={{DRYRUN}}', - 'ruff==0.0.285', + 'ruff==0.0.280', ] is_formatter = true diff --git a/benchmarks/functional_autograd_benchmark/torchvision_models.py b/benchmarks/functional_autograd_benchmark/torchvision_models.py index 595259bd247d8b..3c1f2b471381cd 100644 --- a/benchmarks/functional_autograd_benchmark/torchvision_models.py +++ b/benchmarks/functional_autograd_benchmark/torchvision_models.py @@ -168,7 +168,7 @@ def __init__( if len(replace_stride_with_dilation) != 3: raise ValueError( "replace_stride_with_dilation should be None " - f"or a 3-element tuple, got {replace_stride_with_dilation}" + "or a 3-element tuple, got {}".format(replace_stride_with_dilation) ) self.groups = groups self.base_width = width_per_group diff --git a/benchmarks/operator_benchmark/benchmark_core.py b/benchmarks/operator_benchmark/benchmark_core.py index 439cdba8b36211..73a43d65ff9884 100644 --- a/benchmarks/operator_benchmark/benchmark_core.py +++ b/benchmarks/operator_benchmark/benchmark_core.py @@ -200,10 +200,10 @@ def __init__(self, args): def _print_header(self): DASH_LINE = "-" * 40 print( - f"# {DASH_LINE}\n" + "# {}\n" "# PyTorch/Caffe2 Operator Micro-benchmarks\n" - f"# {DASH_LINE}\n" - f"# Tag : {self.args.tag_filter}\n" + "# {}\n" + "# Tag : {}\n".format(DASH_LINE, DASH_LINE, self.args.tag_filter) ) if self.args.list_tests: print("# List of tests:") diff --git a/benchmarks/overrides_benchmark/bench.py b/benchmarks/overrides_benchmark/bench.py index 93c7f1b130d4a7..0811d0e0d76221 100644 --- a/benchmarks/overrides_benchmark/bench.py +++ b/benchmarks/overrides_benchmark/bench.py @@ -57,8 +57,10 @@ def main(): bench_min, bench_std = bench(tensor_1, tensor_2) print( - f"Type {t.__name__} had a minimum time of {10**6 * bench_min} us" - f" and a standard deviation of {(10**6) * bench_std} us." + "Type {} had a minimum time of {} us" + " and a standard deviation of {} us.".format( + t.__name__, (10**6 * bench_min), (10**6) * bench_std + ) ) diff --git a/pyproject.toml b/pyproject.toml index eb764cb895fcb9..220986d1160ea7 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -74,7 +74,6 @@ select = [ "PIE807", "PIE810", "PLE", - "RUF017", "TRY302", ] diff --git a/test/cpp_api_parity/functional_impl_check.py b/test/cpp_api_parity/functional_impl_check.py index ad56e917ef6d5f..828f57e7e69812 100644 --- a/test/cpp_api_parity/functional_impl_check.py +++ b/test/cpp_api_parity/functional_impl_check.py @@ -173,7 +173,7 @@ def write_test_to_test_class( assert not ('cpp_options_args' in test_params_dict and 'cpp_function_call' in test_params_dict), ( "Only one of `cpp_options_args` and `cpp_function_call` entries " - f"should be present in test params dict:\n{pprint.pformat(test_params_dict)}") + "should be present in test params dict:\n{}").format(pprint.pformat(test_params_dict)) functional_name = compute_functional_name(test_params_dict) diff --git a/test/cpp_api_parity/module_impl_check.py b/test/cpp_api_parity/module_impl_check.py index 1aa6273f0d99de..aa18798940ae28 100644 --- a/test/cpp_api_parity/module_impl_check.py +++ b/test/cpp_api_parity/module_impl_check.py @@ -209,11 +209,11 @@ def process_test_params_for_module(test_params_dict, device, test_instance_class if 'constructor_args' in test_params_dict: assert 'cpp_constructor_args' in test_params_dict, ( "If `constructor_args` is present in test params dict, to enable C++ API parity test, " - f"`cpp_constructor_args` must be present in:\n{pprint.pformat(test_params_dict)}" + "`cpp_constructor_args` must be present in:\n{}" "If you are interested in adding the C++ API parity test, please see:\n" "NOTE [How to check NN module / functional API parity between Python and C++ frontends]. \n" "If not, please add `test_cpp_api_parity=False` to the test params dict and file an issue about this." - ) + ).format(pprint.pformat(test_params_dict)) return TorchNNModuleTestParams( module_name=module_name, @@ -233,16 +233,16 @@ def write_test_to_test_class( module_name = compute_module_name(test_params_dict) assert hasattr(torch.nn, module_name), ( - f"`torch.nn` doesn't have module `{module_name}`. " + "`torch.nn` doesn't have module `{}`. " "If you are adding a new test, please set `fullname` using format `ModuleName_desc` " - f"or set `module_name` using format `ModuleName` in the module test dict:\n{pprint.pformat(test_params_dict)}" - ) + "or set `module_name` using format `ModuleName` in the module test dict:\n{}" + ).format(module_name, pprint.pformat(test_params_dict)) module_full_name = 'torch::nn::' + module_name assert module_full_name in parity_table['torch::nn'], ( - f"Please add `{module_full_name}` entry to `torch::nn` section of `test/cpp_api_parity/parity-tracker.md`. " - f"(Discovered while processing\n{pprint.pformat(test_params_dict)}.)") + "Please add `{}` entry to `torch::nn` section of `test/cpp_api_parity/parity-tracker.md`. " + "(Discovered while processing\n{}.)").format(module_full_name, pprint.pformat(test_params_dict)) for device in devices: test_params = process_test_params_for_module( diff --git a/test/nn/test_multihead_attention.py b/test/nn/test_multihead_attention.py index d5ae098b1d25ae..7c1ba2084c29b5 100644 --- a/test/nn/test_multihead_attention.py +++ b/test/nn/test_multihead_attention.py @@ -329,7 +329,7 @@ def test_multihead_attn_3d_attn_mask(self): key = torch.rand(batch_size, src_len, embed_dim) # [N, S, D] value = key # [N, S, D] attn_mask = torch.randint(0, 2, (batch_size, tgt_len, src_len)).float() # [N, T, S] - attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, 0.0) + attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0)) mta_model = torch.nn.MultiheadAttention(embed_dim, num_heads) diff --git a/test/onnx/model_defs/squeezenet.py b/test/onnx/model_defs/squeezenet.py index a97b399c7dac55..b6d83cd5305904 100644 --- a/test/onnx/model_defs/squeezenet.py +++ b/test/onnx/model_defs/squeezenet.py @@ -32,7 +32,8 @@ def __init__(self, version=1.0, num_classes=1000, ceil_mode=False): super().__init__() if version not in [1.0, 1.1]: raise ValueError( - f"Unsupported SqueezeNet version {version}:" "1.0 or 1.1 expected" + "Unsupported SqueezeNet version {version}:" + "1.0 or 1.1 expected".format(version=version) ) self.num_classes = num_classes if version == 1.0: diff --git a/test/quantization/core/test_docs.py b/test/quantization/core/test_docs.py index a5a49f2ff2fa61..82ef4d277805bb 100644 --- a/test/quantization/core/test_docs.py +++ b/test/quantization/core/test_docs.py @@ -82,8 +82,10 @@ def get_correct_path(path_from_pytorch): # want to make sure we are actually getting some code, assert last_line_num - line_num_start > 3 or short_snippet, ( - f"The code in {path_to_file} identified by {unique_identifier} seems suspiciously short:" - f"\n\n###code-start####\n{code}###code-end####" + "The code in {} identified by {} seems suspiciously short:" + "\n\n###code-start####\n{}###code-end####".format( + path_to_file, unique_identifier, code + ) ) return code diff --git a/test/quantization/core/test_quantized_op.py b/test/quantization/core/test_quantized_op.py index 1d84f29cd66dca..da7ef834727104 100644 --- a/test/quantization/core/test_quantized_op.py +++ b/test/quantization/core/test_quantized_op.py @@ -806,11 +806,11 @@ def _test_binary_op_scalar_relu(self, A, b, binary_op_name, binary_op, quantized C_relu, C_relu_hat.q_scale(), C_relu_hat.q_zero_point(), dtype) self.assertEqual(C_ref.dequantize(), C_hat.dequantize(), - msg=f"{binary_op_name}_scalar results don't match: " - f"{C_ref.dequantize()} vs {C_hat.dequantize()}") + msg="{}_scalar results don't match: " + "{} vs {}".format(binary_op_name, C_ref.dequantize(), C_hat.dequantize())) self.assertEqual(C_relu_ref.dequantize(), C_relu_hat.dequantize(), - msg=f"{binary_op_name}_scalar_relu results don't match: " - f"{C_relu_ref.dequantize()} vs {C_relu_hat.dequantize()}") + msg="{}_scalar_relu results don't match: " + "{} vs {}".format(binary_op_name, C_relu_ref.dequantize(), C_relu_hat.dequantize())) @unittest.skipIf(IS_MACOS, "skipping macos test") @given(A=hu.tensor(shapes=hu.array_shapes(1, 4, 1, 5), diff --git a/test/test_autocast.py b/test/test_autocast.py index 79499d8cb50d5e..7e449df11fefc5 100644 --- a/test/test_autocast.py +++ b/test/test_autocast.py @@ -67,7 +67,7 @@ def compare(first, second): if (output is not None) and (output_method is not None): self.assertTrue(type(output) == type(output_method)) comparison = compare(output, output_method) - self.assertTrue(comparison, f"torch.{op} result did not match Tensor.{op} result") + self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op)) # Compare numerics to Python-side "autocasting" that (we expect) does the same thing # as the C++-side autocasting, and should be bitwise accurate. diff --git a/test/test_binary_ufuncs.py b/test/test_binary_ufuncs.py index c4bb5a1ba0b54a..ada4005d128c4d 100644 --- a/test/test_binary_ufuncs.py +++ b/test/test_binary_ufuncs.py @@ -168,9 +168,9 @@ def _numel(x): if _numel(l) <= 100 and _numel(r) <= 100: msg = ( "Failed to produce expected results! Input lhs tensor was" - f" {l}, rhs tensor was {r}, torch result is {actual}, and reference result is" - f" {expected}." - ) + " {}, rhs tensor was {}, torch result is {}, and reference result is" + " {}." + ).format(l, r, actual, expected) else: msg = None diff --git a/test/test_cpp_extensions_jit.py b/test/test_cpp_extensions_jit.py index c7ebe5aef74bdb..85b5a410742ff6 100644 --- a/test/test_cpp_extensions_jit.py +++ b/test/test_cpp_extensions_jit.py @@ -150,14 +150,17 @@ def _check_cuobjdump_output(expected_values, is_ptx=False): err = err.decode("ascii") if not p.returncode == 0 or not err == '': - raise AssertionError(f"Flags: {flags}\nReturncode: {p.returncode}\nStderr: {err}\n" - f"Output: {output} ") + raise AssertionError("Flags: {}\nReturncode: {}\nStderr: {}\n" + "Output: {} ".format(flags, p.returncode, + err, output)) actual_arches = sorted(re.findall(r'sm_\d\d', output)) expected_arches = sorted(['sm_' + xx for xx in expected_values]) self.assertEqual(actual_arches, expected_arches, - msg=f"Flags: {flags}, Actual: {actual_arches}, Expected: {expected_arches}\n" - f"Stderr: {err}\nOutput: {output}") + msg="Flags: {}, Actual: {}, Expected: {}\n" + "Stderr: {}\nOutput: {}".format( + flags, actual_arches, expected_arches, + err, output)) temp_dir = tempfile.mkdtemp() old_envvar = os.environ.get('TORCH_CUDA_ARCH_LIST', None) diff --git a/test/test_cuda.py b/test/test_cuda.py index 5f637ac9fb8580..43914649992c51 100644 --- a/test/test_cuda.py +++ b/test/test_cuda.py @@ -168,7 +168,7 @@ def test_out_of_memory_retry(self): def test_set_per_process_memory_fraction(self): # test invalid fraction value. with self.assertRaisesRegex(TypeError, "Invalid type"): - torch.cuda.set_per_process_memory_fraction(1) + torch.cuda.set_per_process_memory_fraction(int(1)) with self.assertRaisesRegex(ValueError, "Invalid fraction value"): torch.cuda.set_per_process_memory_fraction(-0.1) with self.assertRaisesRegex(ValueError, "Invalid fraction value"): @@ -1765,7 +1765,7 @@ def compare(first, second): if (output is not None) and (output_method is not None): self.assertTrue(type(output) == type(output_method)) comparison = compare(output, output_method) - self.assertTrue(comparison, f"torch.{op} result did not match Tensor.{op} result") + self.assertTrue(comparison, "torch.{0} result did not match Tensor.{0} result".format(op)) # Compare numerics to Python-side "autocasting" that (we expect) does the same thing # as the C++-side autocasting, and should be bitwise accurate. diff --git a/test/test_dispatch.py b/test/test_dispatch.py index 5b03c49c3090d0..cb485bda7af49c 100644 --- a/test/test_dispatch.py +++ b/test/test_dispatch.py @@ -152,10 +152,10 @@ def check_invariants(actual_provenance): # NB: this finally test asserts that if a registrations fails, # the dispatcher is left in the same state *that it was before*! check_invariants( - f"running ctors {ctor_order[:i]} and then failing to run ctor {op_ix} " + "running ctors {} and then failing to run ctor {} " "(did this failure leave the dispatcher in a wedged state? " "it shouldn't!)" - ) + .format(ctor_order[:i], op_ix)) break last_ctor = i if expect_raises and len(active_ops) == len(ops): @@ -165,7 +165,7 @@ def check_invariants(actual_provenance): self.assertTrue( False, "expected exception to be raised, but nothing was raised " - f"(after running ctors {ctor_order})") + "(after running ctors {})".format(ctor_order)) # In the order specified by dtor_order, run deregistrations for i, op_ix in enumerate(dtor_order): # Trigger a destruction diff --git a/test/test_jit.py b/test/test_jit.py index cb71f5c888a830..03fc3679e2c3d6 100644 --- a/test/test_jit.py +++ b/test/test_jit.py @@ -5810,19 +5810,19 @@ def test_dispatch(op, expects, dtype, binary=False): raise RuntimeError('Unknown dtype') if binary: - code = f''' + code = ''' graph(%3 : Tensor, %4 : Tensor): - %2 : {dtype_str}(*, *) = aten::{op}(%3, %4) - %1 : {dtype_str}(*, *) = aten::relu(%2) + %2 : {dtype}(*, *) = aten::{op}(%3, %4) + %1 : {dtype}(*, *) = aten::relu(%2) return (%1) - ''' + '''.format(op=op, dtype=dtype_str) else: - code = f''' + code = ''' graph(%3 : Tensor): - %2 : {dtype_str}(*, *) = aten::{op}(%3) - %1 : {dtype_str}(*, *) = aten::relu(%2) + %2 : {dtype}(*, *) = aten::{op}(%3) + %1 : {dtype}(*, *) = aten::relu(%2) return (%1) - ''' + '''.format(op=op, dtype=dtype_str) graph = parse_ir(code) inputs = (2 if binary else 1) * [torch.rand(26, 2048, dtype=dtype)] @@ -14936,7 +14936,7 @@ def jit_multihead_attn_forward(query, # type: Tensor value = torch.rand((src_l, bsz, embed_size)) mask = (torch.triu(torch.ones(src_l, src_l)) == 1).transpose(0, 1) - mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, 0.0).to(torch.get_default_dtype()) + mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)).to(torch.get_default_dtype()) jit_out = jit_multihead_attn_forward(query, key, value, embed_size, nhead, diff --git a/test/test_mobile_optimizer.py b/test/test_mobile_optimizer.py index e672d69ab5dd2d..eeb62ecc4bc444 100644 --- a/test/test_mobile_optimizer.py +++ b/test/test_mobile_optimizer.py @@ -582,7 +582,7 @@ def dummy_method_ref_attr_pqr(self): self.assertTrue( cloned.qualified_name.startswith('__torch__.'), ("Expected the cloned module's name to start with the string " - f"'__torch__.', but got: {cloned.qualified_name}"), + "'__torch__.', but got: {}").format(cloned.qualified_name), ) diff --git a/test/test_mps.py b/test/test_mps.py index e4cdd1dbfa0a17..b2f7d649be50cb 100644 --- a/test/test_mps.py +++ b/test/test_mps.py @@ -9436,8 +9436,8 @@ def get_grid(device='cpu', data=None): output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(output, groundtruth, atol=1e-5, rtol=0, - msg=f"groundtruth comparison failed for mode={mode}, " - f"padding_mode={padding_mode}") + msg="groundtruth comparison failed for mode={}, " + "padding_mode={}".format(mode, padding_mode)) class TestAdvancedIndexing(TestCaseMPS): supported_dtypes = [torch.float32, torch.float16, torch.int64, torch.int32, torch.int16, torch.uint8] diff --git a/test/test_nn.py b/test/test_nn.py index 556e184c99406b..85aa6034600bbb 100644 --- a/test/test_nn.py +++ b/test/test_nn.py @@ -5959,8 +5959,8 @@ def get_grid(device='cpu', data=None): output = F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners) self.assertEqual(output, groundtruth, atol=1e-5, rtol=0, - msg=f"groundtruth comparison failed for mode={mode}, " - f"padding_mode={padding_mode}") + msg="groundtruth comparison failed for mode={}, " + "padding_mode={}".format(mode, padding_mode)) # See NOTE [ grid_sample CPU fallback ] output = torch._grid_sampler_2d_cpu_fallback( @@ -6047,8 +6047,8 @@ def get_grid(device='cpu', data=None): F.grid_sample(input, grid, mode=mode, padding_mode=padding_mode, align_corners=align_corners).sum().backward() self.assertEqual(grid.grad, groundtruth, atol=1e-5, rtol=0, - msg=f"gradient groundtruth comparison failed for mode={mode}, " - f"padding_mode={padding_mode}, input_requires_grad={input_requires_grad}") + msg="gradient groundtruth comparison failed for mode={}, " + "padding_mode={}, input_requires_grad={}".format(mode, padding_mode, input_requires_grad)) grid.grad.zero_() # See NOTE [ grid_sample CPU fallback ] diff --git a/test/test_ops.py b/test/test_ops.py index a3b14857cae0f4..ab2d1aa599a701 100644 --- a/test/test_ops.py +++ b/test/test_ops.py @@ -1098,8 +1098,10 @@ def _test_consistency_helper(samples, variants): RuntimeError, msg=( "inplace variant either incorrectly allowed " - f"resizing or you have marked the sample {sample.summary()}" - " incorrectly with `broadcasts_self=True" + "resizing or you have marked the sample {}" + " incorrectly with `broadcasts_self=True".format( + sample.summary() + ) ), ): variant_forward = variant( diff --git a/test/test_reductions.py b/test/test_reductions.py index 7ba000f4760235..8cb0a64ce2987d 100644 --- a/test/test_reductions.py +++ b/test/test_reductions.py @@ -3503,8 +3503,8 @@ def to_numpy(input): expected = np.asarray(expected) # transform numpy scalars to numpy.ndarray instances msg = ("Failed to produce expected results! Input tensor was" - f" {t}, torch result is {actual}, and reference result is" - f" {expected}.") if t.numel() < 10 else None + " {}, torch result is {}, and reference result is" + " {}.").format(t, actual, expected) if t.numel() < 10 else None self.assertEqual(actual, expected, msg, exact_dtype=exact_dtype) diff --git a/test/test_tensor_creation_ops.py b/test/test_tensor_creation_ops.py index 815e28282c3ba6..cc419adf8d2c24 100644 --- a/test/test_tensor_creation_ops.py +++ b/test/test_tensor_creation_ops.py @@ -451,8 +451,9 @@ def dtype_name(dtype): other_dtype = torch.float64 if dtype == torch.float32 else torch.float32 a = torch.tensor([1, 2], device=device, dtype=dtype) b = torch.tensor([3, 4], device=device, dtype=other_dtype) - error = f"Expected object of scalar type {dtype_name(dtype)} but got scalar type " \ - f"{dtype_name(other_dtype)} for second argument" + error = "Expected object of scalar type {} but got scalar type " \ + "{} for second argument".format(dtype_name(dtype), + dtype_name(other_dtype)) with self.assertRaisesRegex(RuntimeError, error): op(a, b) @@ -471,8 +472,9 @@ def complex_dtype_name(dtype): b = torch.tensor([3, 4], device=device, dtype=dtype) out = torch.zeros(2, device=device, dtype=dtype) expected_dtype = torch.complex64 if dtype == torch.float32 else torch.complex128 - error = f"Expected object of scalar type {complex_dtype_name(expected_dtype)} but got scalar type " \ - f"{dtype_name(dtype)} for argument 'out'" + error = "Expected object of scalar type {} but got scalar type " \ + "{} for argument 'out'".format( + complex_dtype_name(expected_dtype), dtype_name(dtype)) with self.assertRaisesRegex(RuntimeError, error): op(a, b, out=out) diff --git a/test/test_torch.py b/test/test_torch.py index 2d0e02de141c26..ab1a92bdb43771 100644 --- a/test/test_torch.py +++ b/test/test_torch.py @@ -8682,8 +8682,8 @@ def test_doc_template(self) -> None: for common_args in [multi_dim_common, single_dim_common, factory_common_args, factory_like_common_args]: for k, v in common_args.items(): - self.assertNotIn(v, desc, f'The argument description "{v}" in {func} can be ' - f'replaced by {{{k}}}') + self.assertNotIn(v, desc, 'The argument description "{}" in {} can be ' + 'replaced by {{{}}}'.format(v, func, k)) def test_doc(self): checked_types = (types.MethodType, types.FunctionType, @@ -8719,8 +8719,8 @@ def _test_namespace(ns, *skips): full_name = ns_name + '.' + name if any(r.match(name) for r in skip_regexes): self.assertFalse(has_doc, - f'New docs have been added for {full_name}, please remove ' - 'it from the skipped list in TestTorch.test_doc') + 'New docs have been added for {}, please remove ' + 'it from the skipped list in TestTorch.test_doc'.format(full_name)) else: self.assertTrue(has_doc, f'{full_name} is missing documentation') diff --git a/test/test_transformers.py b/test/test_transformers.py index 8a57af902c6e66..52eabc0b9a0552 100644 --- a/test/test_transformers.py +++ b/test/test_transformers.py @@ -146,7 +146,7 @@ def test_self_attn_TxT_attn_mask(self, device): query = torch.rand(batch_size, tgt_len, embed_dim, device=device) # [N, T, D] attn_mask = torch.randint(0, 2, (tgt_len, tgt_len)).cuda().float() # [T, T] - attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, 0.0) + attn_mask = attn_mask.masked_fill(attn_mask == 0, float('-inf')).masked_fill(attn_mask == 1, float(0.0)) attn_mask_4d = attn_mask.expand(batch_size, num_heads, tgt_len, tgt_len) diff --git a/test/test_unary_ufuncs.py b/test/test_unary_ufuncs.py index eb6e270cde57d9..6c6744b6b781e6 100644 --- a/test/test_unary_ufuncs.py +++ b/test/test_unary_ufuncs.py @@ -101,9 +101,9 @@ def test_float_domains(self, device, dtype, op): result.item(), float("nan"), msg=( - f"input of {lower_tensor.item()} outside lower domain boundary" - f" {low} produced {result.item()}, not nan!" - ), + "input of {} outside lower domain boundary" + " {} produced {}, not nan!" + ).format(lower_tensor.item(), low, result.item()), ) if high is not None: @@ -120,9 +120,9 @@ def test_float_domains(self, device, dtype, op): result.item(), float("nan"), msg=( - f"input of {higher_tensor.item()} outside upper domain boundary" - f" {high} produced {result.item()}, not nan!" - ), + "input of {} outside upper domain boundary" + " {} produced {}, not nan!" + ).format(higher_tensor.item(), high, result.item()), ) # Helper for comparing torch tensors and numpy arrays @@ -245,9 +245,9 @@ def _helper_reference_numerics( if t.numel() < 10: msg = ( "Failed to produce expected results! Input tensor was" - f" {t}, torch result is {actual}, and reference result is" - f" {expected}." - ) + " {}, torch result is {}, and reference result is" + " {}." + ).format(t, actual, expected) else: msg = None diff --git a/test/torch_np/numpy_tests/core/test_multiarray.py b/test/torch_np/numpy_tests/core/test_multiarray.py index 5f48d1c848ed1e..65bd6edb353b35 100644 --- a/test/torch_np/numpy_tests/core/test_multiarray.py +++ b/test/torch_np/numpy_tests/core/test_multiarray.py @@ -6431,7 +6431,7 @@ def test_exotic_2(self): e = float("-Infinity") assert_equal(np.where(True, d, e).dtype, np.float32) # also check upcast - e = 1e150 + e = float(1e150) assert_equal(np.where(True, d, e).dtype, np.float64) def test_ndim(self): diff --git a/test/torch_np/numpy_tests/core/test_scalarmath.py b/test/torch_np/numpy_tests/core/test_scalarmath.py index 6e9f70392f5d78..6819dc640d01a6 100644 --- a/test/torch_np/numpy_tests/core/test_scalarmath.py +++ b/test/torch_np/numpy_tests/core/test_scalarmath.py @@ -264,7 +264,9 @@ def test_mixed_types(self): a = t1(3) b = t2(2) result = a**b - msg = f"error with {t1!r} and {t2!r}:" f"got {result!r}, expected {9!r}" + msg = ("error with {!r} and {!r}:" "got {!r}, expected {!r}").format( + t1, t2, result, 9 + ) if np.issubdtype(np.dtype(result), np.integer): assert_(result == 9, msg) else: diff --git a/test/torch_np/numpy_tests/lib/test_histograms.py b/test/torch_np/numpy_tests/lib/test_histograms.py index 156a61b2d0a1e0..e3c0d5cbbff6f0 100644 --- a/test/torch_np/numpy_tests/lib/test_histograms.py +++ b/test/torch_np/numpy_tests/lib/test_histograms.py @@ -485,8 +485,8 @@ def test_simple(self): assert_equal( len(a), numbins, - err_msg=f"For the {estimator} estimator " - f"with datasize of {testlen}", + err_msg="For the {} estimator " + "with datasize of {}".format(estimator, testlen), ) def test_small(self): @@ -532,8 +532,8 @@ def test_small(self): assert_equal( len(a), expbins, - err_msg=f"For the {estimator} estimator " - f"with datasize of {testlen}", + err_msg="For the {} estimator " + "with datasize of {}".format(estimator, testlen), ) def test_incorrect_methods(self): @@ -566,7 +566,7 @@ def test_novariance(self): assert_equal( len(a), numbins, - err_msg=f"{estimator} estimator, " "No Variance test", + err_msg="{} estimator, " "No Variance test".format(estimator), ) def test_limited_variance(self): diff --git a/tools/pyi/gen_pyi.py b/tools/pyi/gen_pyi.py index 1f01d96ad5ccfa..d255852be11215 100644 --- a/tools/pyi/gen_pyi.py +++ b/tools/pyi/gen_pyi.py @@ -462,7 +462,7 @@ def gen_nn_functional(fm: FileManager) -> None: "pdist", "cosine_similarity", ] - imported_hints = [f"from .. import {_} as {_}" for _ in torch_imports] + imported_hints = ["from .. import {0} as {0}".format(_) for _ in torch_imports] # Functions imported into `torch.nn.functional` from `torch._C._nn` c_nn_imports = [ @@ -479,7 +479,9 @@ def gen_nn_functional(fm: FileManager) -> None: "one_hot", "scaled_dot_product_attention", ] - imported_hints += [f"from .._C._nn import {_} as {_}" for _ in c_nn_imports] + imported_hints += [ + "from .._C._nn import {0} as {0}".format(_) for _ in c_nn_imports + ] # This is from `torch._C._nn` but renamed imported_hints.append("from .._C._nn import log_sigmoid\nlogsigmoid = log_sigmoid") @@ -873,13 +875,15 @@ def gen_pyi( ) for binop in ["mul", "true_divide", "floor_divide"]: unsorted_function_hints[binop].append( - f"def {binop}(input: Union[Tensor, Number], other: Union[Tensor, Number], " - "*, out: Optional[Tensor] = None) -> Tensor: ..." + "def {}(input: Union[Tensor, Number], other: Union[Tensor, Number], " + "*, out: Optional[Tensor] = None) -> Tensor: ...".format(binop) ) for binop in ["add", "sub"]: unsorted_function_hints[binop].append( - f"def {binop}(input: Union[Tensor, Number], other: Union[Tensor, Number], " - "*, alpha: Optional[Number] = 1, out: Optional[Tensor] = None) -> Tensor: ..." + "def {}(input: Union[Tensor, Number], other: Union[Tensor, Number], " + "*, alpha: Optional[Number] = 1, out: Optional[Tensor] = None) -> Tensor: ...".format( + binop + ) ) native_functions = parse_native_yaml( @@ -1082,8 +1086,8 @@ def replace_special_case(hint: str) -> str: binop += "_" out_suffix = "" unsorted_tensor_method_hints[binop].append( - f"def {binop}(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]{out_suffix})" - " -> Tensor: ..." + "def {}(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat]{})" + " -> Tensor: ...".format(binop, out_suffix) ) for binop in ["add", "sub"]: for inplace in [False, True]: @@ -1092,9 +1096,9 @@ def replace_special_case(hint: str) -> str: binop += "_" out_suffix = "" unsorted_tensor_method_hints[binop].append( - f"def {binop}(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], " - f"*, alpha: Optional[Number] = 1{out_suffix})" - " -> Tensor: ..." + "def {}(self, other: Union[Tensor, Number, torch.SymInt, torch.SymFloat], " + "*, alpha: Optional[Number] = 1{})" + " -> Tensor: ...".format(binop, out_suffix) ) simple_conversions = [ "byte", diff --git a/torch/__init__.py b/torch/__init__.py index 55bb56033376ea..f3caf80fd081b9 100644 --- a/torch/__init__.py +++ b/torch/__init__.py @@ -1733,8 +1733,8 @@ def _register_device_module(device_type, module): device_type = torch.device(device_type).type m = sys.modules[__name__] if hasattr(m, device_type): - raise RuntimeError(f"The runtime module of '{device_type}' has already " - f"been registered with '{getattr(m, device_type)}'") + raise RuntimeError("The runtime module of '{}' has already " + "been registered with '{}'".format(device_type, getattr(m, device_type))) setattr(m, device_type, module) torch_module_name = '.'.join([__name__, device_type]) sys.modules[torch_module_name] = module diff --git a/torch/_lobpcg.py b/torch/_lobpcg.py index a5ed5cf8fcfd26..16f37cdf6a4a8e 100644 --- a/torch/_lobpcg.py +++ b/torch/_lobpcg.py @@ -614,8 +614,8 @@ def _lobpcg( if m < 3 * n: raise ValueError( - f"LPBPCG algorithm is not applicable when the number of A rows (={m})" - f" is smaller than 3 x the number of requested eigenpairs (={n})" + "LPBPCG algorithm is not applicable when the number of A rows (={})" + " is smaller than 3 x the number of requested eigenpairs (={})".format(m, n) ) method = "ortho" if method is None else method @@ -1151,7 +1151,9 @@ def _get_ortho(self, U, V): assert B is not None raise ValueError( "Overdetermined shape of U:" - f" #B-cols(={B.shape[-1]}) >= #U-cols(={U.shape[-1]}) + #V-cols(={V.shape[-1]}) must hold" + " #B-cols(={}) >= #U-cols(={}) + #V-cols(={}) must hold".format( + B.shape[-1], U.shape[-1], V.shape[-1] + ) ) self.ivars["ortho_i"] = i self.ivars["ortho_j"] = j diff --git a/torch/_namedtensor_internals.py b/torch/_namedtensor_internals.py index cbc9de2de091d0..47bdcd82d14ab7 100644 --- a/torch/_namedtensor_internals.py +++ b/torch/_namedtensor_internals.py @@ -50,8 +50,8 @@ def single_ellipsis_index(names, fn_name): ellipsis_indices = [i for i, name in enumerate(names) if is_ellipsis(name)] if len(ellipsis_indices) >= 2: raise RuntimeError( - f"{fn_name}: More than one Ellipsis ('...') found in names (" - f"{names}). This function supports up to one Ellipsis." + "{}: More than one Ellipsis ('...') found in names (" + "{}). This function supports up to one Ellipsis.".format(fn_name, names) ) if len(ellipsis_indices) == 1: return ellipsis_indices[0] @@ -97,8 +97,15 @@ def update_names_with_mapping(tensor, rename_map, inplace): dim_map[old_dim] = new_dim else: raise RuntimeError( - f"{namer_api_name(inplace)}: Tried to rename dim '{old_dim}' to dim " - f"{new_dim} in Tensor[{tensor.names}] but dim '{old_dim}' does not exist" + ( + "{api_name}: Tried to rename dim '{old_dim}' to dim " + "{new_dim} in Tensor[{dims}] but dim '{old_dim}' does not exist" + ).format( + old_dim=old_dim, + new_dim=new_dim, + dims=tensor.names, + api_name=namer_api_name(inplace), + ) ) return tensor._update_names(tuple(dim_map.values()), inplace) @@ -142,10 +149,10 @@ def update_names(tensor, names, rename_map, inplace): has_rename_pairs = bool(rename_map) if has_names and has_rename_pairs: raise RuntimeError( - f"{namer_api_name(inplace)}: This function takes either positional " - f"args or keyword args, but not both. Use tensor.{namer_api_name(inplace)}(*names) " - f"to name dims and tensor.{namer_api_name(inplace)}(**rename_map) to rename " - "dims." + "{api_name}: This function takes either positional " + "args or keyword args, but not both. Use tensor.{api_name}(*names) " + "to name dims and tensor.{api_name}(**rename_map) to rename " + "dims.".format(api_name=namer_api_name(inplace)) ) # Special case for tensor.rename(*[]), which is valid for a 0 dim tensor. diff --git a/torch/_numpy/testing/utils.py b/torch/_numpy/testing/utils.py index 03f0e99b93d470..d48809dc14653c 100644 --- a/torch/_numpy/testing/utils.py +++ b/torch/_numpy/testing/utils.py @@ -1410,8 +1410,8 @@ def assert_array_max_ulp(a, b, maxulp=1, dtype=None): ret = nulp_diff(a, b, dtype) if not np.all(ret <= maxulp): raise AssertionError( - f"Arrays are not almost equal up to {maxulp:g} " - f"ULP (max difference is {np.max(ret):g} ULP)" + "Arrays are not almost equal up to {:g} " + "ULP (max difference is {:g} ULP)".format(maxulp, np.max(ret)) ) return ret diff --git a/torch/_ops.py b/torch/_ops.py index 19d7df3aab237b..329d8fa534a950 100644 --- a/torch/_ops.py +++ b/torch/_ops.py @@ -646,8 +646,10 @@ def __getattr__(self, key): # an object name different from the one the attribute # query was performed on. raise AttributeError( - f"'{str(self)}' can't have an overload name beginning with '__' and the " - f"underlying op {str(self._op)} has no attribute {key} either." + "'{}' can't have an overload name beginning with '__' and the " + "underlying op {} has no attribute {} either.".format( + str(self), str(self._op), key + ) ) from None try: diff --git a/torch/_prims/__init__.py b/torch/_prims/__init__.py index 8b1a58eef78648..0e7d06a1e5bd43 100644 --- a/torch/_prims/__init__.py +++ b/torch/_prims/__init__.py @@ -1483,8 +1483,10 @@ def _slice_meta( raise ValueError(msg) if x > y: msg = ( - f"Attempting to slice a tensor but a start index in {start_indices} is greater than" - f" the length of its corresponding dimension in shape {a.shape}" + "Attempting to slice a tensor but a start index in {} is greater than" + " the length of its corresponding dimension in shape {}".format( + start_indices, a.shape + ) ) raise ValueError(msg) @@ -1494,14 +1496,16 @@ def _slice_meta( raise ValueError(msg) if x > y: msg = ( - f"Attempting to slice a tensor but a stop index in {limit_indices} is greater than the length of " - f" its corresponding dimension in shape {a.shape}" + "Attempting to slice a tensor but a stop index in {} is greater than the length of " + " its corresponding dimension in shape {}".format( + limit_indices, a.shape + ) ) raise ValueError(msg) if x < z: msg = ( - f"Attempting to slice a tensor but a start index in {x} is greater than " - f" its corresponding stop index {z}" + "Attempting to slice a tensor but a start index in {} is greater than " + " its corresponding stop index {}".format(x, z) ) for x in _strides: diff --git a/torch/_refs/nn/functional/__init__.py b/torch/_refs/nn/functional/__init__.py index 8eafe93e635060..a6427977474036 100644 --- a/torch/_refs/nn/functional/__init__.py +++ b/torch/_refs/nn/functional/__init__.py @@ -605,7 +605,9 @@ def margin_ranking_loss( if input1.ndim != input2.ndim or input1.ndim != target.ndim: raise RuntimeError( "margin_ranking_loss : All input tensors should have same dimension but got sizes: " - f"input1: {input1.shape}, input2: {input2.shape}, target: {target.shape} " + "input1: {}, input2: {}, target: {} ".format( + input1.shape, input2.shape, target.shape + ) ) _check_reduction_value(reduction) loss = torch.clamp_min(-target * (input1 - input2) + margin, 0) diff --git a/torch/ao/nn/quantizable/modules/rnn.py b/torch/ao/nn/quantizable/modules/rnn.py index 882c63d699dcf8..b4de0fd1ed9ca6 100644 --- a/torch/ao/nn/quantizable/modules/rnn.py +++ b/torch/ao/nn/quantizable/modules/rnn.py @@ -318,8 +318,8 @@ def __init__(self, input_size: int, hidden_size: int, if num_layers == 1: warnings.warn("dropout option adds dropout after all but last " "recurrent layer, so non-zero dropout expects " - f"num_layers greater than 1, but got dropout={dropout} " - f"and num_layers={num_layers}") + "num_layers greater than 1, but got dropout={} " + "and num_layers={}".format(dropout, num_layers)) layers = [_LSTMLayer(self.input_size, self.hidden_size, self.bias, batch_first=False, diff --git a/torch/ao/nn/quantized/dynamic/modules/linear.py b/torch/ao/nn/quantized/dynamic/modules/linear.py index a8a366e57f53cd..22f483f32fd7a8 100644 --- a/torch/ao/nn/quantized/dynamic/modules/linear.py +++ b/torch/ao/nn/quantized/dynamic/modules/linear.py @@ -105,7 +105,7 @@ def from_float(cls, mod): weight_observer = default_dynamic_qconfig.weight() dtype = weight_observer.dtype assert dtype in [torch.qint8, torch.float16], "The only supported dtypes for " \ - f"dynamic quantized linear are qint8 and float16 got: {dtype}" + "dynamic quantized linear are qint8 and float16 got: {}".format(dtype) weight_observer(mod.weight) if dtype == torch.qint8: qweight = _quantize_weight(mod.weight.float(), weight_observer) diff --git a/torch/ao/nn/quantized/dynamic/modules/rnn.py b/torch/ao/nn/quantized/dynamic/modules/rnn.py index f8c68c841ef290..af50d51bb37099 100644 --- a/torch/ao/nn/quantized/dynamic/modules/rnn.py +++ b/torch/ao/nn/quantized/dynamic/modules/rnn.py @@ -93,8 +93,8 @@ def __init__(self, mode, input_size, hidden_size, if dropout > 0 and num_layers == 1: # type: ignore[operator] warnings.warn("dropout option adds dropout after all but last " "recurrent layer, so non-zero dropout expects " - f"num_layers greater than 1, but got dropout={dropout} and " - f"num_layers={num_layers}") + "num_layers greater than 1, but got dropout={} and " + "num_layers={}".format(dropout, num_layers)) if mode == 'LSTM': gate_size = 4 * hidden_size diff --git a/torch/ao/quantization/fx/convert.py b/torch/ao/quantization/fx/convert.py index 917735529b7dde..a85036f51be1d4 100644 --- a/torch/ao/quantization/fx/convert.py +++ b/torch/ao/quantization/fx/convert.py @@ -977,8 +977,8 @@ def convert( assert k in convert_node_name_to_qconfig, f'Expected key {k} in convert node_name_to_qconfig' if convert_node_name_to_qconfig[k] is not None: assert qconfig_equals(v, convert_node_name_to_qconfig[k]), \ - f"Expected k {k} to have the same value in prepare and convert QConfigMappings, " \ - f"but {v} was updated to {convert_node_name_to_qconfig[k]}" + "Expected k {} to have the same value in prepare and convert QConfigMappings, " \ + "but {} was updated to {}".format(k, v, convert_node_name_to_qconfig[k]) node_name_to_qconfig = convert_node_name_to_qconfig custom_module_classes = get_custom_module_class_keys(convert_custom_config.observed_to_quantized_mapping) diff --git a/torch/ao/quantization/fx/prepare.py b/torch/ao/quantization/fx/prepare.py index abdfba6ba1f6e0..12a29ebbb86e85 100644 --- a/torch/ao/quantization/fx/prepare.py +++ b/torch/ao/quantization/fx/prepare.py @@ -189,7 +189,7 @@ def _create_obs_or_fq_from_qspec( edge_or_node = quantization_spec.edge_or_node assert edge_or_node in obs_or_fq_map, \ "please make sure only refer to edge or node that has " \ - f"observer/fake_quant inserted: '{edge_or_node}' not in\n{obs_or_fq_map.keys()}" + "observer/fake_quant inserted: '{}' not in\n{}".format(edge_or_node, obs_or_fq_map.keys()) return obs_or_fq_map[edge_or_node] elif isinstance(quantization_spec, DerivedQuantizationSpec): # can't use asdict, so not calling get_observer_kwargs here diff --git a/torch/ao/quantization/pt2e/qat_utils.py b/torch/ao/quantization/pt2e/qat_utils.py index ee4982942d93fb..837e19b16028f3 100644 --- a/torch/ao/quantization/pt2e/qat_utils.py +++ b/torch/ao/quantization/pt2e/qat_utils.py @@ -226,10 +226,10 @@ def _quantized_qat_conv2d_bn_pattern( ) else: scaled_weight = torch.ops.quantized_decomposed.quantize_per_tensor( - scaled_weight, 1.0, 0, weight_quant_min, weight_quant_max, torch.int8, + scaled_weight, 1.0, int(0), weight_quant_min, weight_quant_max, torch.int8, ) scaled_weight = torch.ops.quantized_decomposed.dequantize_per_tensor( - scaled_weight, 1.0, 0, weight_quant_min, weight_quant_max, torch.int8, + scaled_weight, 1.0, int(0), weight_quant_min, weight_quant_max, torch.int8, ) if has_bias: zero_bias = torch.zeros_like(kwargs["conv_bias"], dtype=x.dtype) @@ -283,10 +283,10 @@ def _folded_quantized_qat_conv2d_bn_pattern( ) else: conv_weight = torch.ops.quantized_decomposed.quantize_per_tensor( - conv_weight, 1.0, 0, weight_quant_min, weight_quant_max, torch.int8, + conv_weight, 1.0, int(0), weight_quant_min, weight_quant_max, torch.int8, ) conv_weight = torch.ops.quantized_decomposed.dequantize_per_tensor( - conv_weight, 1.0, 0, weight_quant_min, weight_quant_max, torch.int8, + conv_weight, 1.0, int(0), weight_quant_min, weight_quant_max, torch.int8, ) if has_bias: x = F.conv2d(x, conv_weight, kwargs["conv_bias"]) diff --git a/torch/ao/quantization/utils.py b/torch/ao/quantization/utils.py index 308fce44a40c6f..154372ef9a8597 100644 --- a/torch/ao/quantization/utils.py +++ b/torch/ao/quantization/utils.py @@ -201,7 +201,7 @@ def get_swapped_custom_module_class(custom_module, custom_module_class_mapping, quant_type = get_quant_type(qconfig) class_mapping = custom_module_class_mapping.get(quant_type, {}) assert type(custom_module) in class_mapping, "did not find corresponding observed " \ - f"module class for {type(custom_module)} in mapping: {class_mapping}" + "module class for {} in mapping: {}".format(type(custom_module), class_mapping) return class_mapping[type(custom_module)] def activation_dtype(qconfig): @@ -298,8 +298,8 @@ def get_quant_type(qconfig): elif activation.dtype == torch.float16: return QuantType.STATIC - raise Exception(f"Unrecognized dtype combination in get_quant_type: activation({activation.dtype})," - f"weight({weight.dtype})") + raise Exception("Unrecognized dtype combination in get_quant_type: activation({})," + "weight({})".format(activation.dtype, weight.dtype)) def check_min_max_valid(min_val: torch.Tensor, max_val: torch.Tensor) -> bool: """ Checks if the given minimum and maximum values are valid, meaning that diff --git a/torch/autograd/functional.py b/torch/autograd/functional.py index 30045bc8671cf5..755494a88ade4c 100644 --- a/torch/autograd/functional.py +++ b/torch/autograd/functional.py @@ -33,13 +33,17 @@ def _as_tuple(inp, arg_name=None, fn_name=None): if not isinstance(el, torch.Tensor): if is_inp_tuple: raise TypeError( - f"The {arg_name} given to {fn_name} must be either a Tensor or a tuple of Tensors but the" - f" value at index {i} has type {type(el)}." + "The {} given to {} must be either a Tensor or a tuple of Tensors but the" + " value at index {} has type {}.".format( + arg_name, fn_name, i, type(el) + ) ) else: raise TypeError( - f"The {arg_name} given to {fn_name} must be either a Tensor or a tuple of Tensors but the" - f" given {arg_name} has type {type(el)}." + "The {} given to {} must be either a Tensor or a tuple of Tensors but the" + " given {} has type {}.".format( + arg_name, fn_name, arg_name, type(el) + ) ) return is_inp_tuple, inp @@ -130,35 +134,37 @@ def _check_requires_grad(inputs, input_type, strict): if inp is None: # This can only be reached for grad_inputs. raise RuntimeError( - f"The output of the user-provided function is independent of input {i}." - " This is not allowed in strict mode." + "The output of the user-provided function is independent of input {}." + " This is not allowed in strict mode.".format(i) ) if not inp.requires_grad: if input_type == "hessian": raise RuntimeError( - f"The hessian of the user-provided function with respect to input {i}" + "The hessian of the user-provided function with respect to input {}" " is independent of the input. This is not allowed in strict mode." " You should ensure that your function is thrice differentiable and that" - " the hessian depends on the inputs." + " the hessian depends on the inputs.".format(i) ) elif input_type == "jacobian": raise RuntimeError( "While computing the hessian, found that the jacobian of the user-provided" - f" function with respect to input {i} is independent of the input. This is not" + " function with respect to input {} is independent of the input. This is not" " allowed in strict mode. You should ensure that your function is twice" " differentiable and that the jacobian depends on the inputs (this would be" - " violated by a linear function for example)." + " violated by a linear function for example).".format(i) ) elif input_type == "grad_inputs": raise RuntimeError( - f"The gradient with respect to input {i} is independent of the inputs of the" - " user-provided function. This is not allowed in strict mode." + "The gradient with respect to input {} is independent of the inputs of the" + " user-provided function. This is not allowed in strict mode.".format( + i + ) ) else: raise RuntimeError( - f"Output {i} of the user-provided function does not require gradients." + "Output {} of the user-provided function does not require gradients." " The outputs must be computed in a differentiable manner from the input" - " when running in strict mode." + " when running in strict mode.".format(i) ) @@ -215,25 +221,27 @@ def _fill_in_zeros(grads, refs, strict, create_graph, stage): if stage == "back": raise RuntimeError( "The output of the user-provided function is independent of " - f"input {i}. This is not allowed in strict mode." + "input {}. This is not allowed in strict mode.".format(i) ) elif stage == "back_trick": raise RuntimeError( - f"The gradient with respect to the input is independent of entry {i}" + "The gradient with respect to the input is independent of entry {}" " in the grad_outputs when using the double backward trick to compute" - " forward mode gradients. This is not allowed in strict mode." + " forward mode gradients. This is not allowed in strict mode.".format( + i + ) ) elif stage == "double_back": raise RuntimeError( "The jacobian of the user-provided function is independent of " - f"input {i}. This is not allowed in strict mode." + "input {}. This is not allowed in strict mode.".format(i) ) else: raise RuntimeError( "The hessian of the user-provided function is independent of " - f"entry {i} in the grad_jacobian. This is not allowed in strict " + "entry {} in the grad_jacobian. This is not allowed in strict " "mode as it prevents from using the double backward trick to " - "replace forward mode AD." + "replace forward mode AD.".format(i) ) grads_i = torch.zeros_like(refs[i]) @@ -242,12 +250,16 @@ def _fill_in_zeros(grads, refs, strict, create_graph, stage): if "double" not in stage: raise RuntimeError( "The jacobian of the user-provided function is independent of " - f"input {i}. This is not allowed in strict mode when create_graph=True." + "input {}. This is not allowed in strict mode when create_graph=True.".format( + i + ) ) else: raise RuntimeError( "The hessian of the user-provided function is independent of " - f"input {i}. This is not allowed in strict mode when create_graph=True." + "input {}. This is not allowed in strict mode when create_graph=True.".format( + i + ) ) res += (grads_i,) @@ -799,17 +811,17 @@ def vjp(grad_output): if strict and create_graph and not vj_el.requires_grad: msg = ( "The jacobian of the user-provided function is " - f"independent of input {i}. This is not allowed in " - "strict mode when create_graph=True." + "independent of input {}. This is not allowed in " + "strict mode when create_graph=True.".format(i) ) raise RuntimeError(msg) jac_i_el.append(vj_el) else: if strict: msg = ( - f"Output {i} of the user-provided function is " - f"independent of input {el_idx}. This is not allowed in " - "strict mode." + "Output {} of the user-provided function is " + "independent of input {}. This is not allowed in " + "strict mode.".format(i, el_idx) ) raise RuntimeError(msg) jac_i_el.append(torch.zeros_like(inp_el)) diff --git a/torch/autograd/profiler_util.py b/torch/autograd/profiler_util.py index de330f10a4fe21..a6944d6a4840e5 100644 --- a/torch/autograd/profiler_util.py +++ b/torch/autograd/profiler_util.py @@ -246,14 +246,20 @@ def export_chrome_trace(self, path): # 's' and 'f' draw Flow arrows from # the CPU launch to the GPU kernel f.write( - f'{{"name": "{evt.trace_name}", ' + '{{"name": "{}", ' '"ph": "s", ' - f'"ts": {evt.time_range.start}, ' - f'"tid": {evt.thread}, ' + '"ts": {}, ' + '"tid": {}, ' '"pid": "CPU functions", ' - f'"id": {next_id}, ' - f'"cat": "cpu_to_{device_name}", ' - '"args": {{}}}}, ' + '"id": {}, ' + '"cat": "cpu_to_{}", ' + '"args": {{}}}}, '.format( + evt.trace_name, + evt.time_range.start, + evt.thread, + next_id, + device_name, + ) ) # Note: use torch.profiler to get device kernel trace next_id += 1 diff --git a/torch/backends/cuda/__init__.py b/torch/backends/cuda/__init__.py index 54ecd2681387a7..ab63b15f32e074 100644 --- a/torch/backends/cuda/__init__.py +++ b/torch/backends/cuda/__init__.py @@ -94,8 +94,10 @@ def __getitem__(self, device): index = torch.cuda._utils._get_device_index(device) if index < 0 or index >= torch.cuda.device_count(): raise RuntimeError( - f"cufft_plan_cache: expected 0 <= device index < {torch.cuda.device_count()}, but got " - f"device with index {index}" + ( + "cufft_plan_cache: expected 0 <= device index < {}, but got " + "device with index {}" + ).format(torch.cuda.device_count(), index) ) if len(self.caches) == 0: self.caches.extend( diff --git a/torch/distributed/distributed_c10d.py b/torch/distributed/distributed_c10d.py index 59e5a71f76c7fa..8269caec4421fa 100644 --- a/torch/distributed/distributed_c10d.py +++ b/torch/distributed/distributed_c10d.py @@ -1123,9 +1123,9 @@ def init_process_group( if backend == Backend.MPI: if world_size != -1 or rank != -1: warnings.warn( - f"For MPI backend, world_size ({world_size}) and rank ({rank}) " + "For MPI backend, world_size ({}) and rank ({}) " "are ignored since they are assigned by the " - "MPI runtime." + "MPI runtime.".format(world_size, rank) ) default_pg, _ = _new_process_group_helper( diff --git a/torch/distributed/nn/api/remote_module.py b/torch/distributed/nn/api/remote_module.py index 935e27436ea28d..75d7e6215d5620 100644 --- a/torch/distributed/nn/api/remote_module.py +++ b/torch/distributed/nn/api/remote_module.py @@ -500,8 +500,8 @@ def _check_attribute_picklability(self): and k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING ): raise AttributeError( - f"Attribute {k} must be either in ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` or " - "``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``." + "Attribute {} must be either in ``_REMOTE_MODULE_PICKLED_ATTRIBUTES`` or " + "``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.".format(k) ) def _install_generated_methods(self): @@ -729,9 +729,11 @@ def _remote_module_reducer(remote_module): # Check if unpickled attributes are all in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING. elif k not in _REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING: print( - f"The new attribute ``{k}`` of RemoteModule is ignored during RPC pickling. " + "The new attribute ``{}`` of RemoteModule is ignored during RPC pickling. " "To pickle this attribute, please add it to ``_REMOTE_MODULE_PICKLED_ATTRIBUTES``. " - "Otherwise, please explicitly add it to ``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.", + "Otherwise, please explicitly add it to ``_REMOTE_MODULE_ATTRIBUTES_IGNORE_FOR_PICKLING``.".format( + k + ), file=sys.stderr, ) diff --git a/torch/distributed/pipeline/sync/pipe.py b/torch/distributed/pipeline/sync/pipe.py index 65063e9b1c8246..707f56c9d2eee9 100644 --- a/torch/distributed/pipeline/sync/pipe.py +++ b/torch/distributed/pipeline/sync/pipe.py @@ -112,8 +112,8 @@ def _retrieve_device(module: nn.Module) -> torch.device: device = parameter.device elif device != parameter.device: raise ValueError( - f'nn.Module: {module}, should have all parameters on a single device,' - ' please use .to() to place the module on a single device') + 'nn.Module: {}, should have all parameters on a single device,' + ' please use .to() to place the module on a single device'.format(module)) return device if device is not None else torch.device("cpu") diff --git a/torch/distributions/independent.py b/torch/distributions/independent.py index 35b705fd0f29c7..a58e81b7562e4b 100644 --- a/torch/distributions/independent.py +++ b/torch/distributions/independent.py @@ -45,7 +45,9 @@ def __init__( if reinterpreted_batch_ndims > len(base_distribution.batch_shape): raise ValueError( "Expected reinterpreted_batch_ndims <= len(base_distribution.batch_shape), " - f"actual {reinterpreted_batch_ndims} vs {len(base_distribution.batch_shape)}" + "actual {} vs {}".format( + reinterpreted_batch_ndims, len(base_distribution.batch_shape) + ) ) shape = base_distribution.batch_shape + base_distribution.event_shape event_dim = reinterpreted_batch_ndims + len(base_distribution.event_shape) diff --git a/torch/fx/experimental/unification/multipledispatch/dispatcher.py b/torch/fx/experimental/unification/multipledispatch/dispatcher.py index d2a8e6bfc7ffff..65ad70a3e1abb4 100644 --- a/torch/fx/experimental/unification/multipledispatch/dispatcher.py +++ b/torch/fx/experimental/unification/multipledispatch/dispatcher.py @@ -205,9 +205,9 @@ def add(self, signature, func): if not isinstance(typ, (type, list)): str_sig = ', '.join(c.__name__ if isinstance(c, type) else str(c) for c in signature) - raise TypeError(f"Tried to dispatch on non-type: {typ}\n" - f"In signature: <{str_sig}>\n" - f"In function: {self.name}") + raise TypeError("Tried to dispatch on non-type: {}\n" + "In signature: <{}>\n" + "In function: {}".format(typ, str_sig, self.name)) # handle variadic signatures if isinstance(typ, list): @@ -272,7 +272,8 @@ def __call__(self, *args, **kwargs): raise NotImplementedError( "Matching functions for " - f"{self.name}: <{str_signature(types)}> found, but none completed successfully",) from e + "{}: <{}> found, but none completed successfully".format( + self.name, str_signature(types),),) from e def __str__(self): return f"" diff --git a/torch/jit/_recursive.py b/torch/jit/_recursive.py index c7d10750377120..aa4e370e3b25d1 100644 --- a/torch/jit/_recursive.py +++ b/torch/jit/_recursive.py @@ -310,17 +310,17 @@ def infer_type(name, item): ) warnings.warn( - f"'{name}' was found in ScriptModule constants, " - f" but it is a non-constant {hint}. Consider removing it." + "'{}' was found in ScriptModule constants, " + " but it is a non-constant {}. Consider removing it.".format(name, hint) ) continue if not hasattr(nn_module, name): # TODO: We should really error in this case, but its bc-breaking so # we need to warn for at least one release warnings.warn( - f"'{name}' was found in ScriptModule constants, " + "'{}' was found in ScriptModule constants, " "but was not actually set in __init__. " - "Consider removing it." + "Consider removing it.".format(name) ) continue value = getattr(nn_module, name) @@ -370,8 +370,8 @@ def infer_type(name, item): hint = ( "(This function exists as an attribute on the Python module, " "but we failed to compile it to a TorchScript function. " - f"\nThe error stack is reproduced here:\n{e}" - ) + "\nThe error stack is reproduced here:\n{}" + ).format(e) concrete_type_builder.add_failed_attribute(name, hint) pass @@ -998,9 +998,9 @@ def try_compile_fn(fn, loc): if not inspect.isfunction(fn) and not inspect.ismethod(fn): raise RuntimeError( - f"`{fn}` is not a function. Recursive scripting only supports " + "`{}` is not a function. Recursive scripting only supports " "Python functions or methods currently.\n" - f"Consider manually annotating `{fn}` with @torch.jit.script." + "Consider manually annotating `{}` with @torch.jit.script.".format(fn, fn) ) # We don't have the actual scope where the function was defined, but we can diff --git a/torch/jit/_script.py b/torch/jit/_script.py index acedc52786f709..bcfe27b9663c1d 100644 --- a/torch/jit/_script.py +++ b/torch/jit/_script.py @@ -257,7 +257,7 @@ def __setitem__(self, k, v): else: raise RuntimeError( "Cannot re-assign modules in a ScriptModule with non-scripted " - f"module, tried to replace existing module '{k}': {v}" + "module, tried to replace existing module '{}': {}".format(k, v) ) def __getitem__(self, k): @@ -1402,7 +1402,7 @@ def _check_overload_defaults(impl_defaults, overload_defaults, loc): loc, "Default parameters on overloads do not affect the runtime so they " "must equal to the default parameter on the implementation function. Found on " - f"parameter {name}", + "parameter {name}".format(name=name), ) @@ -1461,9 +1461,9 @@ def _check_directly_compile_overloaded(obj): qual_name = _qualified_name(obj) if _jit_internal._get_fn_overloads(qual_name) or _try_get_jit_cached_overloads(obj): raise RuntimeError( - f"Function {qual_name} cannot be directly compiled because it" + "Function {} cannot be directly compiled because it" " is overloaded. It must be used in a context of a function" - " where its inputs can determine which overload to call." + " where its inputs can determine which overload to call.".format(qual_name) ) diff --git a/torch/jit/_trace.py b/torch/jit/_trace.py index 2eaa79cfe0bcfc..11e8de464aebc8 100644 --- a/torch/jit/_trace.py +++ b/torch/jit/_trace.py @@ -254,8 +254,10 @@ def run_fwd_bwd(args, force_trace=False, assert_compiled=False): out = (out,) if loss_fn == torch.sum and len(out) != 1: raise ValueError( - f"Model returns {len(out)} outputs, but default loss function " - "(torch.sum) can only handle a single output" + ( + "Model returns {} outputs, but default loss function " + "(torch.sum) can only handle a single output" + ).format(len(out)) ) out_vars, _ = _flatten(out) saved_outs = [ diff --git a/torch/nn/functional.py b/torch/nn/functional.py index 9abf844acce309..0aa729d92124aa 100644 --- a/torch/nn/functional.py +++ b/torch/nn/functional.py @@ -903,7 +903,9 @@ def _unpool_output_size( if len(output_size) != len(kernel_size): raise ValueError( "output_size should be a sequence containing " - f"{len(kernel_size)} or {len(kernel_size) + 2} elements, but it has a length of '{len(output_size)}'" + "{} or {} elements, but it has a length of '{}'".format( + len(kernel_size), len(kernel_size) + 2, len(output_size) + ) ) for d in range(len(kernel_size)): min_size = default_size[d] - stride[d] @@ -2354,8 +2356,8 @@ def embedding_bag( if per_sample_weights is not None and input.size() != per_sample_weights.size(): raise ValueError( - f"embedding_bag: If per_sample_weights ({per_sample_weights.shape}) is not None, " - f"then it must have the same shape as the input ({input.shape})" + "embedding_bag: If per_sample_weights ({}) is not None, " + "then it must have the same shape as the input ({})".format(per_sample_weights.shape, input.shape) ) if not weight.dim() == 2: @@ -2373,7 +2375,7 @@ def embedding_bag( "if input is 2D, then offsets has to be None" ", as input is treated is a mini-batch of" " fixed length sequences. However, found " - f"offsets of type {type_str}" + "offsets of type {}".format(type_str) ) offsets = torch.arange(0, input.numel(), input.size(1), dtype=input.dtype, device=input.device) @@ -2414,7 +2416,7 @@ def embedding_bag( raise NotImplementedError( "embedding_bag: per_sample_weights was not None. " "per_sample_weights is only supported for mode='sum' " - f"(got mode='{mode}'). Please open a feature request on GitHub." + "(got mode='{}'). Please open a feature request on GitHub.".format(mode) ) ret, _, _, _ = torch.embedding_bag( @@ -3221,9 +3223,9 @@ def smooth_l1_loss( ) if not (target.size() == input.size()): warnings.warn( - f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). " + "Using a target size ({}) that is different to the input size ({}). " "This will likely lead to incorrect results due to broadcasting. " - "Please ensure they have the same size.", + "Please ensure they have the same size.".format(target.size(), input.size()), stacklevel=2, ) if size_average is not None or reduce is not None: @@ -3258,9 +3260,9 @@ def huber_loss( delta=delta, ) if not (target.size() == input.size()): - warnings.warn(f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). " + warnings.warn("Using a target size ({}) that is different to the input size ({}). " "This will likely lead to incorrect results due to broadcasting. " - "Please ensure they have the same size.", + "Please ensure they have the same size.".format(target.size(), input.size()), stacklevel=2) expanded_input, expanded_target = torch.broadcast_tensors(input, target) @@ -3286,9 +3288,9 @@ def l1_loss( ) if not (target.size() == input.size()): warnings.warn( - f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). " + "Using a target size ({}) that is different to the input size ({}). " "This will likely lead to incorrect results due to broadcasting. " - "Please ensure they have the same size.", + "Please ensure they have the same size.".format(target.size(), input.size()), stacklevel=2, ) if size_average is not None or reduce is not None: @@ -3317,9 +3319,9 @@ def mse_loss( ) if not (target.size() == input.size()): warnings.warn( - f"Using a target size ({target.size()}) that is different to the input size ({input.size()}). " + "Using a target size ({}) that is different to the input size ({}). " "This will likely lead to incorrect results due to broadcasting. " - "Please ensure they have the same size.", + "Please ensure they have the same size.".format(target.size(), input.size()), stacklevel=2, ) if size_average is not None or reduce is not None: @@ -4042,8 +4044,8 @@ def interpolate(input: Tensor, size: Optional[int] = None, scale_factor: Optiona raise NotImplementedError( "Input Error: Only 3D, 4D and 5D input Tensors supported" - f" (got {input.dim()}D) for the modes: nearest | linear | bilinear | bicubic | trilinear | area | nearest-exact" - f" (got {mode})" + " (got {}D) for the modes: nearest | linear | bilinear | bicubic | trilinear | area | nearest-exact" + " (got {})".format(input.dim(), mode) ) @@ -4275,7 +4277,7 @@ def grid_sample( raise ValueError( "nn.functional.grid_sample(): expected padding_mode " "to be 'zeros', 'border', or 'reflection', " - f"but got: '{padding_mode}'" + "but got: '{}'".format(padding_mode) ) if mode == "bilinear": @@ -4383,7 +4385,7 @@ def affine_grid(theta: Tensor, size: List[int], align_corners: Optional[bool] = raise NotImplementedError( "affine_grid only supports 4D and 5D sizes, " "for 2D and 3D affine transforms, respectively. " - f"Got size {size}." + "Got size {}.".format(size) ) # check for empty span if align_corners and min(spatial_size) == 1: diff --git a/torch/nn/init.py b/torch/nn/init.py index 31e78442796b92..21768a65a9304f 100644 --- a/torch/nn/init.py +++ b/torch/nn/init.py @@ -543,13 +543,14 @@ def deprecated_init(*args, **kwargs): warnings.warn(f"nn.init.{old_name} is now deprecated in favor of nn.init.{new_name}.", stacklevel=2) return meth(*args, **kwargs) - deprecated_init.__doc__ = fr""" + deprecated_init.__doc__ = r""" {old_name}(...) .. warning:: This method is now deprecated in favor of :func:`torch.nn.init.{new_name}`. - See :func:`~torch.nn.init.{new_name}` for details.""" + See :func:`~torch.nn.init.{new_name}` for details.""".format( + old_name=old_name, new_name=new_name) deprecated_init.__name__ = old_name return deprecated_init diff --git a/torch/nn/modules/adaptive.py b/torch/nn/modules/adaptive.py index dd5539bec33020..cf2f56efa8178d 100644 --- a/torch/nn/modules/adaptive.py +++ b/torch/nn/modules/adaptive.py @@ -224,9 +224,11 @@ def forward(self, input_: Tensor, target_: Tensor) -> _ASMoutput: used_rows += row_indices.numel() if used_rows != batch_size: - raise RuntimeError(f"Target values should be in [0, {self.n_classes - 1}], " - f"but values in range [{target.min().item()}, {target.max().item()}] " - "were found. ") + raise RuntimeError("Target values should be in [0, {}], " + "but values in range [{}, {}] " + "were found. ".format(self.n_classes - 1, + target.min().item(), + target.max().item())) head_output = self.head(input) head_logprob = log_softmax(head_output, dim=1) diff --git a/torch/nn/modules/container.py b/torch/nn/modules/container.py index 2b8cd1bab3b267..bbd253530e2c43 100644 --- a/torch/nn/modules/container.py +++ b/torch/nn/modules/container.py @@ -148,7 +148,8 @@ def __add__(self, other) -> 'Sequential': return ret else: raise ValueError('add operator supports only objects ' - f'of Sequential class, but {str(type(other))} is given.') + 'of Sequential class, but {} is given.'.format( + str(type(other)))) def pop(self, key: Union[int, slice]) -> Module: v = self[key] @@ -163,7 +164,8 @@ def __iadd__(self, other) -> 'Sequential': return self else: raise ValueError('add operator supports only objects ' - f'of Sequential class, but {str(type(other))} is given.') + 'of Sequential class, but {} is given.'.format( + str(type(other)))) def __mul__(self, other: int) -> 'Sequential': if not isinstance(other, int): diff --git a/torch/nn/modules/conv.py b/torch/nn/modules/conv.py index cb018d98dc20ef..9fb1135eb9d6ab 100644 --- a/torch/nn/modules/conv.py +++ b/torch/nn/modules/conv.py @@ -656,9 +656,10 @@ def _output_padding(self, input: Tensor, output_size: Optional[List[int]], min_size = min_sizes[i] max_size = max_sizes[i] if size < min_size or size > max_size: - raise ValueError( - f"requested an output size of {output_size}, but valid sizes range " - f"from {min_sizes} to {max_sizes} (for an input of {input.size()[2:]})") + raise ValueError(( + "requested an output size of {}, but valid sizes range " + "from {} to {} (for an input of {})").format( + output_size, min_sizes, max_sizes, input.size()[2:])) res = torch.jit.annotate(List[int], []) for d in range(num_spatial_dims): diff --git a/torch/nn/modules/module.py b/torch/nn/modules/module.py index 8fad740545008e..a0255076c5613a 100644 --- a/torch/nn/modules/module.py +++ b/torch/nn/modules/module.py @@ -461,8 +461,8 @@ def __init__(self, *args, **kwargs) -> None: "".format(type(self).__name__, next(iter(kwargs)))) if self.call_super_init is False and bool(args): - raise TypeError(f"{type(self).__name__}.__init__() takes 1 positional argument but {len(args) + 1} were" - " given") + raise TypeError("{}.__init__() takes 1 positional argument but {} were" + " given".format(type(self).__name__, len(args) + 1)) """ Calls super().__setattr__('a', a) instead of the typical self.a = a @@ -537,9 +537,9 @@ def register_buffer(self, name: str, tensor: Optional[Tensor], persistent: bool elif hasattr(self, name) and name not in self._buffers: raise KeyError(f"attribute '{name}' already exists") elif tensor is not None and not isinstance(tensor, torch.Tensor): - raise TypeError(f"cannot assign '{torch.typename(tensor)}' object to buffer '{name}' " + raise TypeError("cannot assign '{}' object to buffer '{}' " "(torch Tensor or None required)" - ) + .format(torch.typename(tensor), name)) else: for hook in _global_buffer_registration_hooks.values(): output = hook(self, name, tensor) @@ -580,15 +580,15 @@ def register_parameter(self, name: str, param: Optional[Parameter]) -> None: if param is None: self._parameters[name] = None elif not isinstance(param, Parameter): - raise TypeError(f"cannot assign '{torch.typename(param)}' object to parameter '{name}' " + raise TypeError("cannot assign '{}' object to parameter '{}' " "(torch.nn.Parameter or None required)" - ) + .format(torch.typename(param), name)) elif param.grad_fn: raise ValueError( - f"Cannot assign non-leaf Tensor to parameter '{name}'. Model " - f"parameters must be created explicitly. To express '{name}' " + "Cannot assign non-leaf Tensor to parameter '{0}'. Model " + "parameters must be created explicitly. To express '{0}' " "as a function of another Tensor, compute the value in " - "the forward() method.") + "the forward() method.".format(name)) else: for hook in _global_parameter_registration_hooks.values(): output = hook(self, name, param) @@ -1143,7 +1143,7 @@ def to(self, *args, **kwargs): if dtype is not None: if not (dtype.is_floating_point or dtype.is_complex): raise TypeError('nn.Module.to only accepts floating point or complex ' - f'dtypes, but got desired dtype={dtype}') + 'dtypes, but got desired dtype={}'.format(dtype)) if dtype.is_complex: warnings.warn( "Complex modules are a new feature under active development whose design may change, " @@ -1712,9 +1712,9 @@ def remove_from(*dicts_or_sets): self.register_parameter(name, value) elif params is not None and name in params: if value is not None: - raise TypeError(f"cannot assign '{torch.typename(value)}' as parameter '{name}' " + raise TypeError("cannot assign '{}' as parameter '{}' " "(torch.nn.Parameter or None expected)" - ) + .format(torch.typename(value), name)) self.register_parameter(name, value) else: modules = self.__dict__.get('_modules') @@ -1730,9 +1730,9 @@ def remove_from(*dicts_or_sets): modules[name] = value elif modules is not None and name in modules: if value is not None: - raise TypeError(f"cannot assign '{torch.typename(value)}' as child module '{name}' " + raise TypeError("cannot assign '{}' as child module '{}' " "(torch.nn.Module or None expected)" - ) + .format(torch.typename(value), name)) for hook in _global_module_registration_hooks.values(): output = hook(self, name, value) if output is not None: @@ -1742,9 +1742,9 @@ def remove_from(*dicts_or_sets): buffers = self.__dict__.get('_buffers') if buffers is not None and name in buffers: if value is not None and not isinstance(value, torch.Tensor): - raise TypeError(f"cannot assign '{torch.typename(value)}' as buffer '{name}' " + raise TypeError("cannot assign '{}' as buffer '{}' " "(torch.Tensor or None expected)" - ) + .format(torch.typename(value), name)) for hook in _global_buffer_registration_hooks.values(): output = hook(self, name, value) if output is not None: @@ -2000,10 +2000,10 @@ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, if key in state_dict: input_param = state_dict[key] if not torch.overrides.is_tensor_like(input_param): - error_msgs.append(f'While copying the parameter named "{key}", ' + error_msgs.append('While copying the parameter named "{}", ' 'expected torch.Tensor or Tensor-like object from checkpoint but ' - f'received {type(input_param)}' - ) + 'received {}' + .format(key, type(input_param))) continue # This is used to avoid copying uninitialized parameters into @@ -2039,11 +2039,11 @@ def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, else: param.copy_(input_param) except Exception as ex: - error_msgs.append(f'While copying the parameter named "{key}", ' - f'whose dimensions in the model are {param.size()} and ' - f'whose dimensions in the checkpoint are {input_param.size()}, ' - f'an exception occurred : {ex.args}.' - ) + error_msgs.append('While copying the parameter named "{}", ' + 'whose dimensions in the model are {} and ' + 'whose dimensions in the checkpoint are {}, ' + 'an exception occurred : {}.' + .format(key, param.size(), input_param.size(), ex.args)) elif strict: missing_keys.append(key) diff --git a/torch/nn/modules/rnn.py b/torch/nn/modules/rnn.py index b49d804fb45ae6..1ea44a6e4d7379 100644 --- a/torch/nn/modules/rnn.py +++ b/torch/nn/modules/rnn.py @@ -70,8 +70,8 @@ def __init__(self, mode: str, input_size: int, hidden_size: int, if dropout > 0 and num_layers == 1: warnings.warn("dropout option adds dropout after all but last " "recurrent layer, so non-zero dropout expects " - f"num_layers greater than 1, but got dropout={dropout} and " - f"num_layers={num_layers}") + "num_layers greater than 1, but got dropout={} and " + "num_layers={}".format(dropout, num_layers)) if not isinstance(hidden_size, int): raise TypeError(f"hidden_size should be of type int, got: {type(hidden_size).__name__}") diff --git a/torch/nn/parallel/data_parallel.py b/torch/nn/parallel/data_parallel.py index 2551d43fdc9e1e..40253c4b43c218 100644 --- a/torch/nn/parallel/data_parallel.py +++ b/torch/nn/parallel/data_parallel.py @@ -169,8 +169,8 @@ def forward(self, *inputs: Any, **kwargs: Any) -> Any: for t in chain(self.module.parameters(), self.module.buffers()): if t.device != self.src_device_obj: raise RuntimeError("module must have its parameters and buffers " - f"on device {self.src_device_obj} (device_ids[0]) but found one of " - f"them on device: {t.device}") + "on device {} (device_ids[0]) but found one of " + "them on device: {}".format(self.src_device_obj, t.device)) inputs, module_kwargs = self.scatter(inputs, kwargs, self.device_ids) # for forward function without any inputs, empty list and dict will be created @@ -249,8 +249,8 @@ def data_parallel( for t in chain(module.parameters(), module.buffers()): if t.device != src_device_obj: raise RuntimeError("module must have its parameters and buffers " - f"on device {src_device_obj} (device_ids[0]) but found one of " - f"them on device: {t.device}") + "on device {} (device_ids[0]) but found one of " + "them on device: {}".format(src_device_obj, t.device)) inputs, module_kwargs = scatter_kwargs(inputs, module_kwargs, device_ids, dim) # for module without any inputs, empty list and dict will be created diff --git a/torch/nn/parallel/distributed.py b/torch/nn/parallel/distributed.py index 24f63e18a29ee0..d72346f2376019 100644 --- a/torch/nn/parallel/distributed.py +++ b/torch/nn/parallel/distributed.py @@ -697,7 +697,9 @@ def __init__( self._log_and_throw( ValueError, "DistributedDataParallel's input module must be on " - f"the same type of devices, but input module parameters locate in {distinct_device_types}.", + "the same type of devices, but input module parameters locate in {}.".format( + distinct_device_types + ), ) self.device_type = list(distinct_device_types)[0] diff --git a/torch/nn/parallel/parallel_apply.py b/torch/nn/parallel/parallel_apply.py index 36de8845e56b49..fc14a968620dca 100644 --- a/torch/nn/parallel/parallel_apply.py +++ b/torch/nn/parallel/parallel_apply.py @@ -71,8 +71,8 @@ def _worker( if t is None: with lock: results[i] = ExceptionWrapper( - where=f"in replica {i}, no device was provided and no tensor input was found; " - "device cannot be resolved") + where="in replica {}, no device was provided and no tensor input was found; " + "device cannot be resolved".format(i)) return device = t.get_device() if stream is None: diff --git a/torch/nn/parameter.py b/torch/nn/parameter.py index c8716b6c20d3b2..c15ad0c863c94b 100644 --- a/torch/nn/parameter.py +++ b/torch/nn/parameter.py @@ -154,11 +154,11 @@ def __torch_function__(cls, func, types, args=(), kwargs=None): kwargs = {} return super().__torch_function__(func, types, args, kwargs) raise ValueError( - f'Attempted to use an uninitialized parameter in {func}. ' + 'Attempted to use an uninitialized parameter in {}. ' 'This error happens when you are using a `LazyModule` or ' - f'explicitly manipulating `torch.nn.parameter.{cls.__name__}` ' + 'explicitly manipulating `torch.nn.parameter.{}` ' 'objects. When using LazyModules Call `forward` with a dummy batch ' - 'to initialize the parameters before calling torch functions') + 'to initialize the parameters before calling torch functions'.format(func, cls.__name__)) def is_lazy(param): diff --git a/torch/nn/utils/parametrizations.py b/torch/nn/utils/parametrizations.py index 40dc763269be33..c451be6dd7920c 100644 --- a/torch/nn/utils/parametrizations.py +++ b/torch/nn/utils/parametrizations.py @@ -379,7 +379,7 @@ def __init__( if n_power_iterations <= 0: raise ValueError('Expected n_power_iterations to be positive, but ' - f'got n_power_iterations={n_power_iterations}') + 'got n_power_iterations={}'.format(n_power_iterations)) self.dim = dim if dim >= 0 else dim + ndim self.eps = eps if ndim > 1: diff --git a/torch/nn/utils/prune.py b/torch/nn/utils/prune.py index 15e244afaf1da8..1e16f11b6b626e 100644 --- a/torch/nn/utils/prune.py +++ b/torch/nn/utils/prune.py @@ -288,7 +288,9 @@ def add_pruning_method(self, method): elif method is not None and self._tensor_name != method._tensor_name: raise ValueError( "Can only add pruning methods acting on " - f"the parameter named '{self._tensor_name}' to PruningContainer {self}." + "the parameter named '{}' to PruningContainer {}.".format( + self._tensor_name, self + ) + f" Found '{method._tensor_name}'" ) # if all checks passed, add to _pruning_methods tuple @@ -1090,7 +1092,9 @@ def global_unstructured(parameters, pruning_method, importance_scores=None, **kw if method.PRUNING_TYPE != "unstructured": raise TypeError( 'Only "unstructured" PRUNING_TYPE supported for ' - f"the `pruning_method`. Found method {pruning_method} of type {method.PRUNING_TYPE}" + "the `pruning_method`. Found method {} of type {}".format( + pruning_method, method.PRUNING_TYPE + ) ) container.add_pruning_method(method) @@ -1276,7 +1280,7 @@ def _validate_structured_pruning(t): raise ValueError( "Structured pruning can only be applied to " "multidimensional tensors. Found tensor of shape " - f"{shape} with {len(shape)} dims" + "{} with {} dims".format(shape, len(shape)) ) diff --git a/torch/nn/utils/rnn.py b/torch/nn/utils/rnn.py index fd7a12d933df1c..4f84cfb9d5a9a3 100644 --- a/torch/nn/utils/rnn.py +++ b/torch/nn/utils/rnn.py @@ -327,8 +327,8 @@ def pad_packed_sequence( if total_length < max_seq_length: raise ValueError("Expected total_length to be at least the length " "of the longest sequence in input, but got " - f"total_length={total_length} and max sequence length being {max_seq_length}" - ) + "total_length={} and max sequence length being {}" + .format(total_length, max_seq_length)) max_seq_length = total_length padded_output, lengths = _VF._pad_packed_sequence( sequence.data, sequence.batch_sizes, batch_first, padding_value, max_seq_length) diff --git a/torch/nn/utils/spectral_norm.py b/torch/nn/utils/spectral_norm.py index 0dd5973abb6783..ea29d095ea1c0f 100644 --- a/torch/nn/utils/spectral_norm.py +++ b/torch/nn/utils/spectral_norm.py @@ -29,7 +29,7 @@ def __init__(self, name: str = 'weight', n_power_iterations: int = 1, dim: int = self.dim = dim if n_power_iterations <= 0: raise ValueError('Expected n_power_iterations to be positive, but ' - f'got n_power_iterations={n_power_iterations}') + 'got n_power_iterations={}'.format(n_power_iterations)) self.n_power_iterations = n_power_iterations self.eps = eps diff --git a/torch/optim/lr_scheduler.py b/torch/optim/lr_scheduler.py index 337537ebe5a514..d78fdbf18580b1 100644 --- a/torch/optim/lr_scheduler.py +++ b/torch/optim/lr_scheduler.py @@ -40,7 +40,7 @@ def __init__(self, optimizer, last_epoch=-1, verbose=False): for i, group in enumerate(optimizer.param_groups): if 'initial_lr' not in group: raise KeyError("param 'initial_lr' is not specified " - f"in param_groups[{i}] when resuming an optimizer") + "in param_groups[{}] when resuming an optimizer".format(i)) self.base_lrs = [group['initial_lr'] for group in optimizer.param_groups] self.last_epoch = last_epoch @@ -645,8 +645,8 @@ def __init__(self, optimizer, schedulers, milestones, last_epoch=-1, verbose=Fal if (len(milestones) != len(schedulers) - 1): raise ValueError( "Sequential Schedulers expects number of schedulers provided to be one more " - f"than the number of milestone points, but got number of schedulers {len(schedulers)} and the " - f"number of milestones to be equal to {len(milestones)}" + "than the number of milestone points, but got number of schedulers {} and the " + "number of milestones to be equal to {}".format(len(schedulers), len(milestones)) ) self._schedulers = schedulers self._milestones = milestones @@ -862,7 +862,7 @@ def __init__(self, schedulers): if (schedulers[scheduler_idx].optimizer != schedulers[0].optimizer): raise ValueError( "ChainedScheduler expects all schedulers to belong to the same optimizer, but " - f"got schedulers at index {0} and {scheduler_idx} to be different" + "got schedulers at index {} and {} to be different".format(0, scheduler_idx) ) self._schedulers = list(schedulers) self.optimizer = schedulers[0].optimizer diff --git a/torch/overrides.py b/torch/overrides.py index 1fd26d5312ad8e..80d4a8844a75e1 100644 --- a/torch/overrides.py +++ b/torch/overrides.py @@ -1577,9 +1577,9 @@ def handle_torch_function( func_name = f'{public_api.__module__}.{public_api.__name__}' msg = ( - f"no implementation found for '{func_name}' on types that implement " - f'__torch_function__: {[type(arg) for arg in overloaded_args]}' - ) + "no implementation found for '{}' on types that implement " + '__torch_function__: {}' + ).format(func_name, [type(arg) for arg in overloaded_args]) if _is_torch_function_mode_enabled(): msg += f" nor in mode {_get_current_function_mode()}" raise TypeError(msg) diff --git a/torch/quasirandom.py b/torch/quasirandom.py index 1c9b949c55651c..c5086da283a4d2 100644 --- a/torch/quasirandom.py +++ b/torch/quasirandom.py @@ -122,11 +122,11 @@ def draw_base2(self, m: int, out: Optional[torch.Tensor] = None, total_n = self.num_generated + n if not (total_n & (total_n - 1) == 0): raise ValueError("The balance properties of Sobol' points require " - f"n to be a power of 2. {self.num_generated} points have been " - f"previously generated, then: n={self.num_generated}+2**{m}={total_n}. " + "n to be a power of 2. {0} points have been " + "previously generated, then: n={0}+2**{1}={2}. " "If you still want to do this, please use " "'SobolEngine.draw()' instead." - ) + .format(self.num_generated, m, total_n)) return self.draw(n=n, out=out, dtype=dtype) def reset(self): diff --git a/torch/serialization.py b/torch/serialization.py index aeb1d8444612ba..875c6abe030a81 100644 --- a/torch/serialization.py +++ b/torch/serialization.py @@ -202,8 +202,10 @@ def check_module_version_greater_or_equal(module, req_version_tuple, error_if_ma except Exception as e: message = ( - f"'{module.__name__}' module version string is malformed '{module.__version__}' and cannot be compared" - f" with tuple {str(req_version_tuple)}" + "'{}' module version string is malformed '{}' and cannot be compared" + " with tuple {}" + ).format( + module.__name__, module.__version__, str(req_version_tuple) ) if error_if_malformed: raise RuntimeError(message) from e diff --git a/torch/testing/_internal/common_device_type.py b/torch/testing/_internal/common_device_type.py index c1d19c02b43bf1..7f045b0e658b31 100644 --- a/torch/testing/_internal/common_device_type.py +++ b/torch/testing/_internal/common_device_type.py @@ -1195,7 +1195,7 @@ def __init__(self, *args, device_type="all"): assert isinstance(arg, (list, tuple)), \ "When one dtype variant is a tuple or list, " \ "all dtype variants must be. " \ - f"Received non-list non-tuple dtype {str(arg)}" + "Received non-list non-tuple dtype {}".format(str(arg)) assert all(isinstance(dtype, torch.dtype) for dtype in arg), f"Unknown dtype in {str(arg)}" else: assert all(isinstance(arg, torch.dtype) for arg in args), f"Unknown dtype in {str(args)}" diff --git a/torch/testing/_internal/common_utils.py b/torch/testing/_internal/common_utils.py index 1e18ca2afec56e..8d2a5e1ac8525f 100644 --- a/torch/testing/_internal/common_utils.py +++ b/torch/testing/_internal/common_utils.py @@ -506,8 +506,9 @@ def test_wrapper(*args, **kwargs): values = list(values) if len(self.arg_names) > 1 else [values] if len(values) != len(self.arg_names): - raise RuntimeError(f'Expected # values == # arg names, but got: {len(values)} ' - f'values and {len(self.arg_names)} names for test "{test.__name__}"') + raise RuntimeError('Expected # values == # arg names, but got: {} ' + 'values and {} names for test "{}"'.format( + len(values), len(self.arg_names), test.__name__)) param_kwargs = dict(zip(self.arg_names, values)) @@ -3466,9 +3467,9 @@ def accept_output(update_type): return accept_output("output") else: raise RuntimeError( - f"I got this output for {munged_id}{subname_output}:\n\n{s}\n\n" - "No expect file exists; to accept the current output, run:\n" - f"python {__main__.__file__} {munged_id} --accept") from None + ("I got this output for {}{}:\n\n{}\n\n" + "No expect file exists; to accept the current output, run:\n" + "python {} {} --accept").format(munged_id, subname_output, s, __main__.__file__, munged_id)) from None # a hack for JIT tests if IS_WINDOWS: @@ -4071,9 +4072,10 @@ def check_test_defined_in_running_script(test_case): if running_script_path is None: return test_case_class_file = os.path.abspath(os.path.realpath(inspect.getfile(test_case.__class__))) - assert test_case_class_file == running_script_path, f"Class of loaded TestCase \"{test_case.id()}\" " \ - f"is not defined in the running script \"{running_script_path}\", but in \"{test_case_class_file}\". Did you " \ - "accidentally import a unittest.TestCase from another file?" + assert test_case_class_file == running_script_path, "Class of loaded TestCase \"{}\" " \ + "is not defined in the running script \"{}\", but in \"{}\". Did you " \ + "accidentally import a unittest.TestCase from another file?".format( + test_case.id(), running_script_path, test_case_class_file) def load_tests(loader, tests, pattern): set_running_script_path() diff --git a/torch/testing/_internal/hypothesis_utils.py b/torch/testing/_internal/hypothesis_utils.py index 0654a64b96b268..4ace78f7594e33 100644 --- a/torch/testing/_internal/hypothesis_utils.py +++ b/torch/testing/_internal/hypothesis_utils.py @@ -362,7 +362,7 @@ def assert_deadline_disabled(): warning_message = ( "Your version of hypothesis is outdated. " "To avoid `DeadlineExceeded` errors, please update. " - f"Current hypothesis version: {hypothesis.__version__}" + "Current hypothesis version: {}".format(hypothesis.__version__) ) warnings.warn(warning_message) else: diff --git a/torch/testing/_internal/jit_utils.py b/torch/testing/_internal/jit_utils.py index 447a425b9a5bfb..f96c2fb436be9d 100644 --- a/torch/testing/_internal/jit_utils.py +++ b/torch/testing/_internal/jit_utils.py @@ -66,7 +66,7 @@ def get_execution_plan(graph_executor_state): num_plans = len(execution_plans) if num_plans != 1: raise RuntimeError('This test assumes this GraphExecutor should ' - f'only have one execution plan, got: {num_plans}') + 'only have one execution plan, got: {}'.format(num_plans)) return execution_plans[0] class _AssertRaisesRegexWithHighlightContext: diff --git a/torch/utils/bundled_inputs.py b/torch/utils/bundled_inputs.py index b18857b0e716ee..df2d771f6f51d1 100644 --- a/torch/utils/bundled_inputs.py +++ b/torch/utils/bundled_inputs.py @@ -334,14 +334,17 @@ def get_all_bundled_inputs_for_{name}(self): # Add to the high level helper methods inputs_info = repr(info[function]) if info and function in info else '[]' - get_bundled_inputs_functions_and_info_template += f""" + get_bundled_inputs_functions_and_info_template += """ temp_dict : Dict[str,List[str]] = {{}} - info: List[str] = {inputs_info} + info: List[str] = {info} temp_dict['info'] = info - temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{function_name}'] - all_inputs['{function_name}'] = temp_dict - """ + temp_dict['get_inputs_function_name'] = ['get_all_bundled_inputs_for_{name}'] + all_inputs['{name}'] = temp_dict + """.format( + name=function_name, + info=inputs_info, + ) # To ensure backwards compatibility and a streamlined api for forward these wrappers are provided if function_name == 'forward': diff --git a/torch/utils/cpp_extension.py b/torch/utils/cpp_extension.py index 6600eb3cb54037..0ed858a96cb464 100644 --- a/torch/utils/cpp_extension.py +++ b/torch/utils/cpp_extension.py @@ -1611,9 +1611,11 @@ def load_inline(name, raise ValueError(f"Expected 'functions' to be a list or dict, but was {type(functions)}") for function_name, docstring in functions.items(): if with_pytorch_error_handling: - module_def.append(f'm.def("{function_name}", torch::wrap_pybind_function({function_name}), "{docstring}");') + module_def.append( + 'm.def("{0}", torch::wrap_pybind_function({0}), "{1}");' + .format(function_name, docstring)) else: - module_def.append(f'm.def("{function_name}", {function_name}, "{docstring}");') + module_def.append('m.def("{0}", {0}, "{1}");'.format(function_name, docstring)) module_def.append('}') cpp_sources += module_def diff --git a/torch/utils/data/dataloader.py b/torch/utils/data/dataloader.py index f3cd8e3d331a41..7f50905617957f 100644 --- a/torch/utils/data/dataloader.py +++ b/torch/utils/data/dataloader.py @@ -315,7 +315,7 @@ def __init__(self, dataset: Dataset[T_co], batch_size: Optional[int] = 1, # See NOTE [ Custom Samplers and IterableDataset ] raise ValueError( "DataLoader with IterableDataset: expected unspecified " - f"batch_sampler option, but got batch_sampler={batch_sampler}") + "batch_sampler option, but got batch_sampler={}".format(batch_sampler)) else: shuffle = bool(shuffle) self._dataset_kind = _DatasetKind.Map @@ -397,19 +397,19 @@ def multiprocessing_context(self, multiprocessing_context): valid_start_methods = multiprocessing.get_all_start_methods() if multiprocessing_context not in valid_start_methods: raise ValueError( - 'multiprocessing_context option ' - f'should specify a valid start method in {valid_start_methods!r}, but got ' - f'multiprocessing_context={multiprocessing_context!r}') + ('multiprocessing_context option ' + 'should specify a valid start method in {!r}, but got ' + 'multiprocessing_context={!r}').format(valid_start_methods, multiprocessing_context)) multiprocessing_context = multiprocessing.get_context(multiprocessing_context) if not isinstance(multiprocessing_context, python_multiprocessing.context.BaseContext): - raise TypeError('multiprocessing_context option should be a valid context ' - 'object or a string specifying the start method, but got ' - f'multiprocessing_context={multiprocessing_context}') + raise TypeError(('multiprocessing_context option should be a valid context ' + 'object or a string specifying the start method, but got ' + 'multiprocessing_context={}').format(multiprocessing_context)) else: - raise ValueError('multiprocessing_context can only be used with ' - 'multi-process loading (num_workers > 0), but got ' - f'num_workers={self.num_workers}') + raise ValueError(('multiprocessing_context can only be used with ' + 'multi-process loading (num_workers > 0), but got ' + 'num_workers={}').format(self.num_workers)) self.__multiprocessing_context = multiprocessing_context diff --git a/torch/utils/data/datapipes/_decorator.py b/torch/utils/data/datapipes/_decorator.py index b6e93776aa38ec..d2afd996f54ba8 100644 --- a/torch/utils/data/datapipes/_decorator.py +++ b/torch/utils/data/datapipes/_decorator.py @@ -70,7 +70,7 @@ def __init__(self, arg: Union[Type[IterDataPipe], Callable[[], bool]]) -> None: if isinstance(arg, Type): # type: ignore[arg-type] if not issubclass(arg, IterDataPipe): # type: ignore[arg-type] raise TypeError("Only `IterDataPipe` can be decorated with `non_deterministic`" - f", but {arg.__name__} is found") + ", but {} is found".format(arg.__name__)) self.cls = arg # type: ignore[assignment] # 2. Decorator has an argument of a function # This class should behave differently given different inputs. Use this @@ -103,13 +103,13 @@ def deterministic_wrapper_fn(self, *args, **kwargs) -> IterDataPipe: res = self.deterministic_fn(*args, **kwargs) # type: ignore[call-arg, misc] if not isinstance(res, bool): raise TypeError("deterministic_fn of `non_deterministic` decorator is required " - f"to return a boolean value, but {type(res)} is found") + "to return a boolean value, but {} is found".format(type(res))) global _determinism if _determinism and res: - raise TypeError(f"{self.cls.__name__} is non-deterministic with the inputs, but you set " # type: ignore[union-attr] + raise TypeError("{} is non-deterministic with the inputs, but you set " "'guaranteed_datapipes_determinism'. You can turn off determinism " "for this DataPipe if that is acceptable for your application" - ) + .format(self.cls.__name__)) # type: ignore[union-attr] return self.cls(*args, **kwargs) # type: ignore[call-arg, misc] @@ -130,9 +130,9 @@ def wrapper(*args, **kwargs): if not isinstance(value, IterDataPipe): raise TypeError(f"Expected argument '{argument_name}' as a IterDataPipe, but found {type(value)}") if not value.type.issubtype(hint.type): - raise TypeError(f"Expected type of argument '{argument_name}' as a subtype of " - f"hint {hint.type}, but found {value.type}" - ) + raise TypeError("Expected type of argument '{}' as a subtype of " + "hint {}, but found {}" + .format(argument_name, hint.type, value.type)) return f(*args, **kwargs) diff --git a/torch/utils/data/sampler.py b/torch/utils/data/sampler.py index 1ba1dfc665efac..1e56df5adc8a43 100644 --- a/torch/utils/data/sampler.py +++ b/torch/utils/data/sampler.py @@ -223,7 +223,7 @@ def __init__(self, weights: Sequence[float], num_samples: int, weights_tensor = torch.as_tensor(weights, dtype=torch.double) if len(weights_tensor.shape) != 1: raise ValueError("weights should be a 1d sequence but given " - f"weights have shape {tuple(weights_tensor.shape)}") + "weights have shape {}".format(tuple(weights_tensor.shape))) self.weights = weights_tensor self.num_samples = num_samples diff --git a/torch/utils/hipify/hipify_python.py b/torch/utils/hipify/hipify_python.py index 4d7bf7233cfccc..34361d09221c5e 100755 --- a/torch/utils/hipify/hipify_python.py +++ b/torch/utils/hipify/hipify_python.py @@ -502,7 +502,7 @@ def hip_header_magic(input_string): # Check if one of the following headers is already included. headers = ["hip/hip_runtime.h", "hip/hip_runtime_api.h"] - if any(re.search(fr'#include ("{ext}"|<{ext}>)', output_string) for ext in headers): + if any(re.search(r'#include ("{0}"|<{0}>)'.format(ext), output_string) for ext in headers): return output_string # Rough logic to detect if we're inside device code diff --git a/torch/utils/hooks.py b/torch/utils/hooks.py index e0ab3618242af1..1cc83911d32de1 100644 --- a/torch/utils/hooks.py +++ b/torch/utils/hooks.py @@ -83,10 +83,10 @@ def warn_if_has_hooks(tensor): for k in tensor._backward_hooks: hook = tensor._backward_hooks[k] if not hasattr(k, "__torch_unserializable__"): - warnings.warn(f"backward hook {repr(hook)} on tensor will not be " + warnings.warn("backward hook {} on tensor will not be " "serialized. If this is expected, you can " "decorate the function with @torch.utils.hooks.unserializable_hook " - "to suppress this warning") + "to suppress this warning".format(repr(hook))) class BackwardHook: """ @@ -140,7 +140,7 @@ def hook(grad_input, _): if len(out) != len(res): raise RuntimeError("Backward hook returned an invalid number of grad_input, " - f"got {len(out)}, but expected {len(res)}") + "got {}, but expected {}".format(len(out), len(res))) res = out @@ -209,7 +209,7 @@ def hook(_, grad_output): actual_len = len(hook_grad_outputs) if actual_len != expected_len: raise RuntimeError("Backward pre hook returned an invalid number of grad_output, " - f"got {actual_len}, but expected {expected_len}") + "got {}, but expected {}".format(actual_len, expected_len)) self.grad_outputs = hook_grad_outputs # Special case if no input required gradients, this hook should call the user diff --git a/torch/utils/mobile_optimizer.py b/torch/utils/mobile_optimizer.py index b89169a80a93e1..fae6efe265f988 100644 --- a/torch/utils/mobile_optimizer.py +++ b/torch/utils/mobile_optimizer.py @@ -95,9 +95,9 @@ def generate_mobile_module_lints(script_module: torch.jit.ScriptModule): for name, param in script_module.named_parameters(): if param.requires_grad: - lint_list.append({"name": LintCode.REQUIRES_GRAD.name, "message": f"Param {name} requires grad, " + lint_list.append({"name": LintCode.REQUIRES_GRAD.name, "message": "Param {} requires grad, " "please set torch.no_grad() to reduce memory usage and improve computation speed during " - "inference phase."}) + "inference phase.".format(name)}) op_names = torch.jit.export_opnames(script_module) for op_name in op_names: