Skip to content

Commit

Permalink
Update to match upstream API change (NFC).
Browse files Browse the repository at this point in the history
This method was renamed but staging function kept, switch to renamed variant.

PiperOrigin-RevId: 715208433
  • Loading branch information
jpienaar authored and Google-ML-Automation committed Jan 14, 2025
1 parent 2eea949 commit dd006b8
Show file tree
Hide file tree
Showing 10 changed files with 22 additions and 24 deletions.
4 changes: 2 additions & 2 deletions xla/backends/gpu/codegen/transforms/convert_float_nvidia.cc
Original file line number Diff line number Diff line change
Expand Up @@ -240,8 +240,8 @@ class ConvertFloatNvidiaPass
void runOnOperation() override {
mlir::RewritePatternSet patterns(&getContext());
patterns.add<RewriteTruncFPattern, RewriteExtFPattern>(&getContext());
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
if (mlir::failed(
mlir::applyPatternsGreedily(getOperation(), std::move(patterns)))) {
signalPassFailure();
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,8 +44,8 @@ class ConvertPureCallOpsPass
auto* ctx = &getContext();
mlir::RewritePatternSet patterns(ctx);
patterns.add<RewriteCall>(ctx);
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
if (mlir::failed(
mlir::applyPatternsGreedily(getOperation(), std::move(patterns)))) {
signalPassFailure();
}
}
Expand Down
4 changes: 2 additions & 2 deletions xla/backends/gpu/codegen/transforms/expand_float_ops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -648,8 +648,8 @@ class ExpandFloatOpsPass
RewriteFpToIPattern<ma::FPToUIOp>>(&getContext());
mlir::populatePolynomialApproximateTanhPattern(patterns);
patterns.add<RewriteErf32Pattern>(&getContext());
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
if (mlir::failed(
mlir::applyPatternsGreedily(getOperation(), std::move(patterns)))) {
signalPassFailure();
}
}
Expand Down
2 changes: 1 addition & 1 deletion xla/backends/gpu/codegen/transforms/flatten_tensors.cc
Original file line number Diff line number Diff line change
Expand Up @@ -700,7 +700,7 @@ class FlattenTensorsPass
// clang-format on
ApplyIndexingOp::getCanonicalizationPatterns(patterns, mlir_context);
if (mlir::failed(
mlir::applyPatternsAndFoldGreedily(module, std::move(patterns)))) {
mlir::applyPatternsGreedily(module, std::move(patterns)))) {
signalPassFailure();
return;
}
Expand Down
8 changes: 4 additions & 4 deletions xla/backends/gpu/codegen/transforms/lower_xla_gpu_to_scf.cc
Original file line number Diff line number Diff line change
Expand Up @@ -388,8 +388,8 @@ class LowerXlaGpuToScfPass
patterns.add<RewritePredicatedInsert, RewritePredicatedExtract,
RewriteShuffleReduce, RewriteMaterialize, RewriteInsert>(
ctx, options_);
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
if (mlir::failed(
mlir::applyPatternsGreedily(getOperation(), std::move(patterns)))) {
signalPassFailure();
}
}
Expand All @@ -405,8 +405,8 @@ class LowerXlaGpuLoopsToScfPass
auto* ctx = &getContext();
mlir::RewritePatternSet patterns(ctx);
patterns.add<RewriteXlaGpuLoop>(ctx);
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
if (mlir::failed(
mlir::applyPatternsGreedily(getOperation(), std::move(patterns)))) {
signalPassFailure();
}
}
Expand Down
8 changes: 4 additions & 4 deletions xla/backends/gpu/codegen/transforms/optimize_loops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -312,8 +312,8 @@ class OptimizeLoopsPass
// First unroll loops. If unrolling is possible, we prefer it.
mlir::RewritePatternSet unroll_patterns(&getContext());
unroll_patterns.add<UnrollLoops>(&getContext());
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(
getOperation(), std::move(unroll_patterns)))) {
if (mlir::failed(mlir::applyPatternsGreedily(getOperation(),
std::move(unroll_patterns)))) {
signalPassFailure();
return;
}
Expand All @@ -322,8 +322,8 @@ class OptimizeLoopsPass
mlir::RewritePatternSet patterns(&getContext());
patterns.add<PipelineLoad<mlir::vector::TransferReadOp>,
PipelineLoad<mlir::tensor::ExtractOp>>(&getContext());
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
if (mlir::failed(
mlir::applyPatternsGreedily(getOperation(), std::move(patterns)))) {
signalPassFailure();
}
}
Expand Down
3 changes: 1 addition & 2 deletions xla/backends/gpu/codegen/transforms/peel_loops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,7 @@ struct PeelLoopsPass : public impl::PeelLoopsPassBase<PeelLoopsPass> {
mlir::MLIRContext* mlir_context = &getContext();
mlir::RewritePatternSet patterns(mlir_context);
patterns.add<PeelLoop>(mlir_context);
if (mlir::failed(
mlir::applyPatternsAndFoldGreedily(func, std::move(patterns)))) {
if (mlir::failed(mlir::applyPatternsGreedily(func, std::move(patterns)))) {
signalPassFailure();
return;
}
Expand Down
2 changes: 1 addition & 1 deletion xla/backends/gpu/codegen/transforms/simplify_affine.cc
Original file line number Diff line number Diff line change
Expand Up @@ -305,7 +305,7 @@ struct SimplifyAffinePass
mlir::GreedyRewriteConfig config;
// There's no point simplifying more than once.
config.strictMode = mlir::GreedyRewriteStrictness::ExistingOps;
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(
if (mlir::failed(mlir::applyPatternsGreedily(
getOperation(), std::move(patterns), config))) {
signalPassFailure();
}
Expand Down
7 changes: 3 additions & 4 deletions xla/backends/gpu/codegen/transforms/simplify_arith.cc
Original file line number Diff line number Diff line change
Expand Up @@ -363,16 +363,15 @@ class SimplifyArithPass
RewriteTruncExtShuffle
>(ctx);
// clang-format on
if (mlir::failed(
mlir::applyPatternsAndFoldGreedily(func, std::move(patterns)))) {
if (mlir::failed(mlir::applyPatternsGreedily(func, std::move(patterns)))) {
signalPassFailure();
}

mlir::RewritePatternSet scf_patterns(ctx);
mlir::scf::ForOp::getCanonicalizationPatterns(scf_patterns, ctx);
mlir::scf::IfOp::getCanonicalizationPatterns(scf_patterns, ctx);
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(
func, std::move(scf_patterns)))) {
if (mlir::failed(
mlir::applyPatternsGreedily(func, std::move(scf_patterns)))) {
signalPassFailure();
}
}
Expand Down
4 changes: 2 additions & 2 deletions xla/backends/gpu/codegen/transforms/unswitch_loops.cc
Original file line number Diff line number Diff line change
Expand Up @@ -89,8 +89,8 @@ void UnswitchLoopsPass::runOnOperation() {
patterns.add<UnswitchLoop>(&getContext());
mlir::scf::ForOp::getCanonicalizationPatterns(patterns, &getContext());
mlir::scf::IfOp::getCanonicalizationPatterns(patterns, &getContext());
if (mlir::failed(mlir::applyPatternsAndFoldGreedily(getOperation(),
std::move(patterns)))) {
if (mlir::failed(
mlir::applyPatternsGreedily(getOperation(), std::move(patterns)))) {
signalPassFailure();
}
}
Expand Down

0 comments on commit dd006b8

Please sign in to comment.