Skip to content

Commit

Permalink
Replace all CHECK_ and DCHECK_ with TORCH_* macros (pytorch#82032)
Browse files Browse the repository at this point in the history
Avoid exposing defines that conflict with google logging, since this blocks external usage of libtorch in certain cases.

All the 'interesting' changes should be in these two files, and the rest should just be mechanical changes via sed.
c10/util/logging_is_not_google_glog.h
c10/util/logging_is_google_glog.h

Fixes pytorch#81415

cc @miladm @malfet
Pull Request resolved: pytorch#82032
Approved by: https://github.com/soumith, https://github.com/miladm
  • Loading branch information
wconstab authored and pytorchmergebot committed Jul 26, 2022
1 parent cab8192 commit 4f34cd6
Show file tree
Hide file tree
Showing 150 changed files with 680 additions and 612 deletions.
10 changes: 5 additions & 5 deletions aten/src/ATen/native/cpu/layer_norm_kernel.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ void LayerNormKernelImplInternal(
Tensor* rstd) {
using T_ACC = vec::vec_scalar_t<T>;
using Vec = vec::Vectorized<T_ACC>;
DCHECK_EQ(X.numel(), M * N);
TORCH_DCHECK_EQ(X.numel(), M * N);
DCHECK(!gamma.defined() || gamma.numel() == N);
DCHECK(!beta.defined() || beta.numel() == N);
const T* X_data = X.data_ptr<T>();
Expand Down Expand Up @@ -117,10 +117,10 @@ void LayerNormBackwardKernelImplInternal(
Tensor* dbeta) {
using T_ACC = vec::vec_scalar_t<T>;
using Vec = vec::Vectorized<T_ACC>;
DCHECK_EQ(dY.numel(), M * N);
DCHECK_EQ(X.numel(), M * N);
DCHECK_EQ(mean.numel(), M);
DCHECK_EQ(rstd.numel(), M);
TORCH_DCHECK_EQ(dY.numel(), M * N);
TORCH_DCHECK_EQ(X.numel(), M * N);
TORCH_DCHECK_EQ(mean.numel(), M);
TORCH_DCHECK_EQ(rstd.numel(), M);
DCHECK(!gamma.defined() || gamma.numel() == N);
const T* dY_data = dY.template data_ptr<T>();
const T* X_data = X.template data_ptr<T>();
Expand Down
2 changes: 1 addition & 1 deletion aten/src/ATen/native/quantized/cpu/qsoftmax.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -94,7 +94,7 @@ Tensor qsoftmax_qnnpack(const Tensor& qx, const int64_t dim) {
TORCH_CHECK(
status == pytorch_qnnp_status_success,
"failed to create QNNPACK Softmax operator");
CHECK_NOTNULL(softargmax);
TORCH_CHECK_NOTNULL(softargmax);

status = pytorch_qnnp_setup_softargmax_nc_q8(
softargmax, batch_size, input, input_stride, output, output_stride);
Expand Down
4 changes: 2 additions & 2 deletions benchmarks/static_runtime/deep_wide_pt_bench.cc
Original file line number Diff line number Diff line change
Expand Up @@ -47,7 +47,7 @@ static void BM_deep_wide_jit_graph_executor(benchmark::State& state) {

std::vector<IValue> inputs({ad_emb_packed, user_emb, wide});

CHECK_EQ(setenv("TORCH_JIT_DISABLE_NEW_EXECUTOR", "1", 1), 0);
TORCH_CHECK_EQ(setenv("TORCH_JIT_DISABLE_NEW_EXECUTOR", "1", 1), 0);

mod.forward(inputs);
for (auto _ : state) {
Expand All @@ -65,7 +65,7 @@ static void BM_deep_wide_jit_profiling_executor(benchmark::State& state) {

std::vector<IValue> inputs({ad_emb_packed, user_emb, wide});

CHECK_EQ(unsetenv("TORCH_JIT_DISABLE_NEW_EXECUTOR"), 0);
TORCH_CHECK_EQ(unsetenv("TORCH_JIT_DISABLE_NEW_EXECUTOR"), 0);

mod.forward(inputs);
for (auto _ : state) {
Expand Down
10 changes: 5 additions & 5 deletions binaries/benchmark_helper.cc
Original file line number Diff line number Diff line change
Expand Up @@ -173,7 +173,7 @@ int loadInput(
LOG(INFO) << "Running on GPU.";
#ifdef __CUDA_ARCH__
caffe2::TensorCUDA* tensor = blob->GetMutable<caffe2::TensorCUDA>();
CHECK_NOTNULL(tensor);
TORCH_CHECK_NOTNULL(tensor);
tensor->Resize(input_dims);
if (input_type_list[i] == "uint8_t") {
tensor->mutable_data<uint8_t>();
Expand All @@ -189,17 +189,17 @@ int loadInput(
if (input_type_list[i] == "uint8_t") {
caffe2::int8::Int8TensorCPU* tensor =
blob->GetMutable<caffe2::int8::Int8TensorCPU>();
CHECK_NOTNULL(tensor);
TORCH_CHECK_NOTNULL(tensor);
tensor->t.Resize(input_dims);
tensor->t.mutable_data<uint8_t>();
} else if (input_type_list[i] == "float") {
caffe2::TensorCPU* tensor = BlobGetMutableTensor(blob, caffe2::CPU);
CHECK_NOTNULL(tensor);
TORCH_CHECK_NOTNULL(tensor);
tensor->Resize(input_dims);
tensor->mutable_data<float>();
} else if (input_type_list[i] == "int") {
caffe2::TensorCPU* tensor = BlobGetMutableTensor(blob, caffe2::CPU);
CHECK_NOTNULL(tensor);
TORCH_CHECK_NOTNULL(tensor);
tensor->Resize(input_dims);
tensor->mutable_data<int>();
} else {
Expand Down Expand Up @@ -495,7 +495,7 @@ int benchmark(
net_def.set_name("benchmark");
}
caffe2::NetBase* net = workspace->CreateNet(net_def);
CHECK_NOTNULL(net);
TORCH_CHECK_NOTNULL(net);
runNetwork(
workspace,
net,
Expand Down
2 changes: 1 addition & 1 deletion binaries/convert_and_benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -591,7 +591,7 @@ void runNetwork(
}

caffe2::NetBase* net = workspace->CreateNet(net_def);
CHECK_NOTNULL(net);
TORCH_CHECK_NOTNULL(net);

LOG(INFO) << "Starting benchmark.";
caffe2::ObserverConfig::initSampleRate(1, 1, 1, run_individual, warmup);
Expand Down
2 changes: 1 addition & 1 deletion binaries/make_image_db.cc
Original file line number Diff line number Diff line change
Expand Up @@ -251,7 +251,7 @@ void ConvertImageDataset(
// Synthesize key for this entry
auto key_len = snprintf(
key_cstr, sizeof(key_cstr), "%08d_%s", i, lines[i].first.c_str());
DCHECK_LE(key_len, sizeof(key_cstr));
TORCH_DCHECK_LE(key_len, sizeof(key_cstr));

// Put in db
transaction->Put(string(key_cstr), std::move(value));
Expand Down
6 changes: 3 additions & 3 deletions binaries/speed_benchmark.cc
Original file line number Diff line number Diff line change
Expand Up @@ -136,12 +136,12 @@ int main(int argc, char** argv) {
if (input_type_list[i] == "uint8_t") {
caffe2::int8::Int8TensorCPU* tensor =
blob->GetMutable<caffe2::int8::Int8TensorCPU>();
CHECK_NOTNULL(tensor);
TORCH_CHECK_NOTNULL(tensor);
tensor->t.Resize(input_dims);
tensor->t.mutable_data<uint8_t>();
} else if (input_type_list[i] == "float") {
caffe2::TensorCPU* tensor = BlobGetMutableTensor(blob, caffe2::CPU);
CHECK_NOTNULL(tensor);
TORCH_CHECK_NOTNULL(tensor);
tensor->Resize(input_dims);
tensor->mutable_data<float>();
} else {
Expand Down Expand Up @@ -184,7 +184,7 @@ int main(int argc, char** argv) {
}

caffe2::NetBase* net = workspace->CreateNet(net_def);
CHECK_NOTNULL(net);
TORCH_CHECK_NOTNULL(net);
CAFFE_ENFORCE(net->Run());
net->TEST_Benchmark(FLAGS_warmup, FLAGS_iter, FLAGS_run_individual);

Expand Down
2 changes: 1 addition & 1 deletion c10/test/util/logging_test.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -141,7 +141,7 @@ TEST(LoggingTest, Join) {

TEST(LoggingTest, TestDanglingElse) {
if (true)
DCHECK_EQ(1, 1);
TORCH_DCHECK_EQ(1, 1);
else
GTEST_FAIL();
}
Expand Down
2 changes: 1 addition & 1 deletion c10/util/Logging.h
Original file line number Diff line number Diff line change
Expand Up @@ -180,7 +180,7 @@ using EnforceNotMet = ::c10::Error;
* With further usages like `CAFFE_ENFORCE_THAT(IsVector(Input(0).dims()))`
*
* Convenient wrappers for binary operations like CAFFE_ENFORCE_EQ are provided
* too. Please use them instead of CHECK_EQ and friends for failures in
* too. Please use them instead of TORCH_CHECK_EQ and friends for failures in
* user-provided input.
*/

Expand Down
6 changes: 3 additions & 3 deletions c10/util/Registry.h
Original file line number Diff line number Diff line change
Expand Up @@ -64,10 +64,10 @@ class Registry {
const RegistryPriority priority = REGISTRY_DEFAULT) {
std::lock_guard<std::mutex> lock(register_mutex_);
// The if statement below is essentially the same as the following line:
// CHECK_EQ(registry_.count(key), 0) << "Key " << key
// TORCH_CHECK_EQ(registry_.count(key), 0) << "Key " << key
// << " registered twice.";
// However, CHECK_EQ depends on google logging, and since registration is
// carried out at static initialization time, we do not want to have an
// However, TORCH_CHECK_EQ depends on google logging, and since registration
// is carried out at static initialization time, we do not want to have an
// explicit dependency on glog's initialization function.
if (registry_.count(key) != 0) {
auto cur_priority = priority_[key];
Expand Down
65 changes: 65 additions & 0 deletions c10/util/logging_is_google_glog.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,6 +50,71 @@ INSTANTIATE_FOR_CONTAINER(set)
#include <glog/logging.h>

// Additional macros on top of glog
#ifndef NDEBUG
#define TORCH_CHECK_EQ(val1, val2) CHECK_EQ(val1, val2)
#define TORCH_CHECK_NE(val1, val2) CHECK_NE(val1, val2)
#define TORCH_CHECK_LE(val1, val2) CHECK_LE(val1, val2)
#define TORCH_CHECK_LT(val1, val2) CHECK_LT(val1, val2)
#define TORCH_CHECK_GE(val1, val2) CHECK_GE(val1, val2)
#define TORCH_CHECK_GT(val1, val2) CHECK_GT(val1, val2)
#define TORCH_DCHECK_EQ(val1, val2) DCHECK_EQ(val1, val2)
#define TORCH_DCHECK_NE(val1, val2) DCHECK_NE(val1, val2)
#define TORCH_DCHECK_LE(val1, val2) DCHECK_LE(val1, val2)
#define TORCH_DCHECK_LT(val1, val2) DCHECK_LT(val1, val2)
#define TORCH_DCHECK_GE(val1, val2) DCHECK_GE(val1, val2)
#define TORCH_DCHECK_GT(val1, val2) DCHECK_GT(val1, val2)
#else // !NDEBUG
// These versions generate no code in optimized mode.
#define TORCH_CHECK_EQ(val1, val2) \
while (false) \
CHECK_EQ(val1, val2)
#define TORCH_CHECK_NE(val1, val2) \
while (false) \
CHECK_NE(val1, val2)
#define TORCH_CHECK_LE(val1, val2) \
while (false) \
CHECK_LE(val1, val2)
#define TORCH_CHECK_LT(val1, val2) \
while (false) \
CHECK_LT(val1, val2)
#define TORCH_CHECK_GE(val1, val2) \
while (false) \
CHECK_GE(val1, val2)
#define TORCH_CHECK_GT(val1, val2) \
while (false) \
CHECK_GT(val1, val2)
#define TORCH_DCHECK_EQ(val1, val2) \
while (false) \
DCHECK_EQ(val1, val2)
#define TORCH_DCHECK_NE(val1, val2) \
while (false) \
DCHECK_NE(val1, val2)
#define TORCH_DCHECK_LE(val1, val2) \
while (false) \
DCHECK_LE(val1, val2)
#define TORCH_DCHECK_LT(val1, val2) \
while (false) \
DCHECK_LT(val1, val2)
#define TORCH_DCHECK_GE(val1, val2) \
while (false) \
DCHECK_GE(val1, val2)
#define TORCH_DCHECK_GT(val1, val2) \
while (false) \
DCHECK_GT(val1, val2)
#endif // NDEBUG

// Check that a pointer is not null.
#define TORCH_CHECK_NOTNULL(val) CHECK_NOTNULL(val)

#ifndef NDEBUG
// Debug only version of TORCH_CHECK_NOTNULL
#define TORCH_DCHECK_NOTNULL(val) DCHECK_NOTNULL(val)
#else // !NDEBUG
// Optimized version - generates no code.
#define TORCH_DCHECK_NOTNULL(val) \
while (false) \
DCHECK_NOTNULL(val)
#endif // NDEBUG

// Log with source location information override (to be used in generic
// warning/error handlers implemented as functions, not macros)
Expand Down
86 changes: 43 additions & 43 deletions c10/util/logging_is_not_google_glog.h
Original file line number Diff line number Diff line change
Expand Up @@ -61,8 +61,8 @@ void LogMessageFatal(const char* file, int line, const T& message) {
MessageLogger(file, line, GLOG_FATAL).stream() << message;
}

// Helpers for CHECK_NOTNULL(). Two are necessary to support both raw pointers
// and smart pointers.
// Helpers for TORCH_CHECK_NOTNULL(). Two are necessary to support both raw
// pointers and smart pointers.
template <typename T>
T& CheckNotNullCommon(const char* file, int line, const char* names, T& t) {
if (t == nullptr) {
Expand Down Expand Up @@ -136,63 +136,63 @@ static_assert(
::c10::MessageLogger(__FILE__, __LINE__, ::c10::GLOG_##n).stream()
#endif // NDEBUG

#define CHECK_OP(val1, val2, op) \
#define TORCH_CHECK_OP(val1, val2, op) \
FATAL_IF(((val1)op(val2))) << "Check failed: " #val1 " " #op " " #val2 " (" \
<< (val1) << " vs. " << (val2) << ") "

// Check_op macro definitions
#define CHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)
#define CHECK_NE(val1, val2) CHECK_OP(val1, val2, !=)
#define CHECK_LE(val1, val2) CHECK_OP(val1, val2, <=)
#define CHECK_LT(val1, val2) CHECK_OP(val1, val2, <)
#define CHECK_GE(val1, val2) CHECK_OP(val1, val2, >=)
#define CHECK_GT(val1, val2) CHECK_OP(val1, val2, >)
// TORCH_CHECK_OP macro definitions
#define TORCH_CHECK_EQ(val1, val2) TORCH_CHECK_OP(val1, val2, ==)
#define TORCH_CHECK_NE(val1, val2) TORCH_CHECK_OP(val1, val2, !=)
#define TORCH_CHECK_LE(val1, val2) TORCH_CHECK_OP(val1, val2, <=)
#define TORCH_CHECK_LT(val1, val2) TORCH_CHECK_OP(val1, val2, <)
#define TORCH_CHECK_GE(val1, val2) TORCH_CHECK_OP(val1, val2, >=)
#define TORCH_CHECK_GT(val1, val2) TORCH_CHECK_OP(val1, val2, >)

#ifndef NDEBUG
// Debug only versions of CHECK_OP macros.
#define DCHECK_EQ(val1, val2) CHECK_OP(val1, val2, ==)
#define DCHECK_NE(val1, val2) CHECK_OP(val1, val2, !=)
#define DCHECK_LE(val1, val2) CHECK_OP(val1, val2, <=)
#define DCHECK_LT(val1, val2) CHECK_OP(val1, val2, <)
#define DCHECK_GE(val1, val2) CHECK_OP(val1, val2, >=)
#define DCHECK_GT(val1, val2) CHECK_OP(val1, val2, >)
// Debug only versions of TORCH_CHECK_OP macros.
#define TORCH_DCHECK_EQ(val1, val2) TORCH_CHECK_OP(val1, val2, ==)
#define TORCH_DCHECK_NE(val1, val2) TORCH_CHECK_OP(val1, val2, !=)
#define TORCH_DCHECK_LE(val1, val2) TORCH_CHECK_OP(val1, val2, <=)
#define TORCH_DCHECK_LT(val1, val2) TORCH_CHECK_OP(val1, val2, <)
#define TORCH_DCHECK_GE(val1, val2) TORCH_CHECK_OP(val1, val2, >=)
#define TORCH_DCHECK_GT(val1, val2) TORCH_CHECK_OP(val1, val2, >)
#else // !NDEBUG
// These versions generate no code in optimized mode.
#define DCHECK_EQ(val1, val2) \
while (false) \
CHECK_OP(val1, val2, ==)
#define DCHECK_NE(val1, val2) \
while (false) \
CHECK_OP(val1, val2, !=)
#define DCHECK_LE(val1, val2) \
while (false) \
CHECK_OP(val1, val2, <=)
#define DCHECK_LT(val1, val2) \
while (false) \
CHECK_OP(val1, val2, <)
#define DCHECK_GE(val1, val2) \
while (false) \
CHECK_OP(val1, val2, >=)
#define DCHECK_GT(val1, val2) \
while (false) \
CHECK_OP(val1, val2, >)
#define TORCH_DCHECK_EQ(val1, val2) \
while (false) \
TORCH_CHECK_OP(val1, val2, ==)
#define TORCH_DCHECK_NE(val1, val2) \
while (false) \
TORCH_CHECK_OP(val1, val2, !=)
#define TORCH_DCHECK_LE(val1, val2) \
while (false) \
TORCH_CHECK_OP(val1, val2, <=)
#define TORCH_DCHECK_LT(val1, val2) \
while (false) \
TORCH_CHECK_OP(val1, val2, <)
#define TORCH_DCHECK_GE(val1, val2) \
while (false) \
TORCH_CHECK_OP(val1, val2, >=)
#define TORCH_DCHECK_GT(val1, val2) \
while (false) \
TORCH_CHECK_OP(val1, val2, >)
#endif // NDEBUG

// Check that a pointer is not null.
#define CHECK_NOTNULL(val) \
::c10::CheckNotNull( \
#define TORCH_CHECK_NOTNULL(val) \
::c10::CheckNotNull( \
__FILE__, __LINE__, "Check failed: '" #val "' Must be non NULL", (val))

#ifndef NDEBUG
// Debug only version of CHECK_NOTNULL
#define DCHECK_NOTNULL(val) \
::c10::CheckNotNull( \
// Debug only version of TORCH_CHECK_NOTNULL
#define TORCH_DCHECK_NOTNULL(val) \
::c10::CheckNotNull( \
__FILE__, __LINE__, "Check failed: '" #val "' Must be non NULL", (val))
#else // !NDEBUG
// Optimized version - generates no code.
#define DCHECK_NOTNULL(val) \
while (false) \
CHECK_NOTNULL(val)
#define TORCH_DCHECK_NOTNULL(val) \
while (false) \
TORCH_CHECK_NOTNULL(val)
#endif // NDEBUG

// ---------------------- Support for std objects --------------------------
Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/fakelowp/fp16_fc_acc_op.h
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@ class Fp16FCAccOp final : public Operator<Context> {

Y_shape_cache_ = X.sizes().vec();
// This is an invariant of canonical_axis, so we can DCHECK.
DCHECK_LE(canonical_axis + 1, Y_shape_cache_.size());
TORCH_DCHECK_LE(canonical_axis + 1, Y_shape_cache_.size());
Y_shape_cache_.resize(canonical_axis + 1);
Y_shape_cache_[canonical_axis] = N;
Y->Resize(Y_shape_cache_);
Expand Down
4 changes: 2 additions & 2 deletions caffe2/contrib/nccl/cuda_nccl_gpu.cc
Original file line number Diff line number Diff line change
Expand Up @@ -91,7 +91,7 @@ NCCLContext* getNCCLContext(const NCCLExecution& ex) {
LOG(INFO) << "Creating NCCLContext for key: " << key;
contexts[key].reset(new NCCLContext(ex));
}
return CHECK_NOTNULL(contexts[key].get());
return TORCH_CHECK_NOTNULL(contexts[key].get());
}

template <typename T>
Expand Down Expand Up @@ -153,7 +153,7 @@ void runNCCL(const NCCLExecution& ex, InitF&& init_f, F&& f) {
auto& comm = comms[i];
auto& stream = streams[i];

DCHECK_EQ(ctx.device, GetGPUIDForPointer(ctx.src->raw_data()));
TORCH_DCHECK_EQ(ctx.device, GetGPUIDForPointer(ctx.src->raw_data()));
CUDA_ENFORCE(cudaStreamWaitEvent(stream, context->master_event_, 0));
f(ctx, comm, stream);
}
Expand Down
2 changes: 1 addition & 1 deletion caffe2/contrib/opencl/context.h
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ class OpenCLContext final {
public:
explicit OpenCLContext();
explicit OpenCLContext(const DeviceOption& option) {
DCHECK_EQ(option.device_type(), PROTO_OPENCL);
TORCH_DCHECK_EQ(option.device_type(), PROTO_OPENCL);
OpenCLContext();
}
~OpenCLContext() {}
Expand Down
2 changes: 1 addition & 1 deletion caffe2/core/common_cudnn.cc
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ CuDNNWrapper::PerGPUCuDNNStates& CuDNNWrapper::cudnn_states() {
// New it (never delete) to avoid calling the destructors on process
// exit and racing against the CUDA shutdown sequence.
static auto* p = new CuDNNWrapper::PerGPUCuDNNStates();
CHECK_NOTNULL(p);
TORCH_CHECK_NOTNULL(p);
return *p;
}

Expand Down
Loading

0 comments on commit 4f34cd6

Please sign in to comment.