Skip to content

Commit

Permalink
cleanup
Browse files Browse the repository at this point in the history
  • Loading branch information
naoyam committed Jan 14, 2025
1 parent e808200 commit f481637
Show file tree
Hide file tree
Showing 4 changed files with 0 additions and 60 deletions.
44 changes: 0 additions & 44 deletions csrc/device_lower/analysis/predicate_elimination.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@
#include <device_lower/utils.h>
#include <disjoint_set.h>
#include <instrumentation.h>
#include <ir/internal_nodes.h>
#include <ir/iostream.h>
#include <ir/utils.h>
#include <ops/arith.h>
Expand Down Expand Up @@ -1197,47 +1196,4 @@ std::string PredicateElimination::toString() const {
return ss.str();
}

PadPredicateInfo::PadPredicateInfo(
Fusion* fusion,
const IdModel& id_model,
const TensorIndexer& tensor_indexer) {
for (auto expr : fusion->exprs()) {
auto pad = dynamic_cast<PadOp*>(expr);
if (pad == nullptr) {
continue;
}
// auto p_tv = pad->in()->as<TensorView>();
auto c_tv = pad->out()->as<TensorView>();

auto all_vals = DependencyCheck::getAllValsBetween(
{fusion->inputs().begin(), fusion->inputs().end()}, {c_tv});

auto pad_resize_exprs = DependencyCheck::getAllExprsBetween(
{c_tv->getRootDomain().begin(), c_tv->getRootDomain().end()},
{c_tv->getLogicalDomain().begin(), c_tv->getLogicalDomain().end()});
NVF_ERROR(std::all_of(
pad_resize_exprs.begin(), pad_resize_exprs.end(), [](Expr* expr) {
return expr->isA<Resize>();
}));

// Check if the pad exprs are propagated to the inputs
for (auto inp_tv : all_vals) {
if (!inp_tv->isA<TensorView>() || !inp_tv->isFusionInput()) {
continue;
}

for (auto inp_use : inp_tv->uses()) {
if (!ir_utils::isTvOp(inp_use)) {
continue;
}

auto inp_consumer_tv = inp_use->output(0)->as<TensorView>();

auto predicate_indexing_path = tensor_indexer.getIndexingPath(
inp_consumer_tv->definition(), inp_consumer_tv->getLogicalDomain());
}
}
}
}

} // namespace nvfuser
10 changes: 0 additions & 10 deletions csrc/device_lower/analysis/predicate_elimination.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,6 @@
#pragma once
#include <exceptions.h>

#include <id_model/id_model.h>
#include <id_model/indexing.h>
#include <ir/all_nodes.h>
#include <kernel_ir.h>

Expand Down Expand Up @@ -70,12 +68,4 @@ class PredicateElimination : public IterVisitor {
std::unordered_map<TensorView*, Val*> init_value_map_;
};

class PadPredicateInfo {
public:
PadPredicateInfo(
Fusion* fusion,
const IdModel& id_model,
const TensorIndexer& tensor_indexer);
};

} // namespace nvfuser
5 changes: 0 additions & 5 deletions csrc/device_lower/lower2device.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -598,11 +598,6 @@ void GpuLower::analysis(Fusion* fusion) {

consumerToTMAInfo() = getConsumerToTMAInfoMap(fusion_);
dumpExprsIfEnabled(fusion_->exprs(), "getConsumerToTMAInfoMap");

if (hasIdModel()) {
pad_predcate_info_ = std::make_unique<PadPredicateInfo>(
fusion_, *id_model_, *tensor_indexer_);
}
}

kir::Kernel* GpuLower::kernel() const {
Expand Down
1 change: 0 additions & 1 deletion csrc/device_lower/lower2device.h
Original file line number Diff line number Diff line change
Expand Up @@ -350,7 +350,6 @@ class GpuLower : public NonCopyable {
std::unique_ptr<IdModel> id_model_;
std::unique_ptr<TensorIndexer> tensor_indexer_;
std::unordered_map<TensorView*, const TMAInfo> consumer_to_tma_info_;
std::unique_ptr<PadPredicateInfo> pad_predcate_info_;

// Track which tensor views are inputs or outputs of a vectorized operation
// and their maximum vectorized access size
Expand Down

0 comments on commit f481637

Please sign in to comment.