From b13fd97addf84b5edc587f20e2b8b6553b7d0d68 Mon Sep 17 00:00:00 2001
From: Alexis <35051714+amontoison@users.noreply.github.com>
Date: Thu, 7 Feb 2019 08:46:04 -0500
Subject: [PATCH] Use opEye() as a default preconditioner

---
 REQUIRE            |  2 +-
 src/cg.jl          |  2 +-
 src/cgls.jl        |  2 +-
 src/cgne.jl        |  2 +-
 src/cgs.jl         |  2 +-
 src/cr.jl          |  4 ++--
 src/crls.jl        |  2 +-
 src/crmr.jl        |  2 +-
 src/diom.jl        |  2 +-
 src/dqgmres.jl     |  2 +-
 src/lslq.jl        | 16 ++++++++++------
 src/lsmr.jl        | 18 +++++++++++-------
 src/lsqr.jl        | 16 ++++++++++------
 src/minres.jl      |  2 +-
 src/symmlq.jl      | 13 ++++++++-----
 test/test_alloc.jl | 38 +++++++++++++++++---------------------
 test/test_utils.jl | 19 -------------------
 17 files changed, 68 insertions(+), 76 deletions(-)
 delete mode 100644 test/test_utils.jl

diff --git a/REQUIRE b/REQUIRE
index 80b8866f3..2050f23e2 100644
--- a/REQUIRE
+++ b/REQUIRE
@@ -1,2 +1,2 @@
 julia 0.7 2.0
-LinearOperators 0.5.1
+LinearOperators 0.5.2
diff --git a/src/cg.jl b/src/cg.jl
index c806b04e8..ec77fa2a5 100644
--- a/src/cg.jl
+++ b/src/cg.jl
@@ -19,7 +19,7 @@ A preconditioner M may be provided in the form of a linear operator and is
 assumed to be symmetric and positive definite.
 """
 function cg(A :: AbstractLinearOperator, b :: AbstractVector{T};
-            M :: AbstractLinearOperator=opEye(size(A,1)), atol :: Float64=1.0e-8,
+            M :: AbstractLinearOperator=opEye(), atol :: Float64=1.0e-8,
             rtol :: Float64=1.0e-6, itmax :: Int=0, radius :: Float64=0.0,
             verbose :: Bool=false) where T <: Number
 
diff --git a/src/cgls.jl b/src/cgls.jl
index 04cc02538..9a15522ae 100644
--- a/src/cgls.jl
+++ b/src/cgls.jl
@@ -38,7 +38,7 @@ It is formally equivalent to LSQR, though can be slightly less accurate,
 but simpler to implement.
 """
 function cgls(A :: AbstractLinearOperator, b :: AbstractVector{T};
-              M :: AbstractLinearOperator=opEye(size(b,1)), λ :: Float64=0.0,
+              M :: AbstractLinearOperator=opEye(), λ :: Float64=0.0,
               atol :: Float64=1.0e-8, rtol :: Float64=1.0e-6, radius :: Float64=0.0,
               itmax :: Int=0, verbose :: Bool=false) where T <: Number
 
diff --git a/src/cgne.jl b/src/cgne.jl
index 161800cfa..d26c7fc6f 100644
--- a/src/cgne.jl
+++ b/src/cgne.jl
@@ -54,7 +54,7 @@ but simpler to implement. Only the x-part of the solution is returned.
 A preconditioner M may be provided in the form of a linear operator.
 """
 function cgne(A :: AbstractLinearOperator, b :: AbstractVector{T};
-              M :: AbstractLinearOperator=opEye(size(A,1)), λ :: Float64=0.0,
+              M :: AbstractLinearOperator=opEye(), λ :: Float64=0.0,
               atol :: Float64=1.0e-8, rtol :: Float64=1.0e-6, itmax :: Int=0,
               verbose :: Bool=false) where T <: Number
 
diff --git a/src/cgs.jl b/src/cgs.jl
index 6c9571f2f..33d04cdb8 100644
--- a/src/cgs.jl
+++ b/src/cgs.jl
@@ -33,7 +33,7 @@ TFQMR and BICGSTAB were developed to remedy this difficulty.»
 This implementation allows a right preconditioner M.
 """
 function cgs(A :: AbstractLinearOperator, b :: AbstractVector{T};
-             M :: AbstractLinearOperator=opEye(size(A,1)),
+             M :: AbstractLinearOperator=opEye(),
              atol :: Float64=1.0e-8, rtol :: Float64=1.0e-6,
              itmax :: Int=0, verbose :: Bool=false) where {T <: Number}
 
diff --git a/src/cr.jl b/src/cr.jl
index 6ecdb2c21..93e959b3b 100644
--- a/src/cr.jl
+++ b/src/cr.jl
@@ -15,7 +15,7 @@ assumed to be symmetric and positive definite.
 In a linesearch context, 'linesearch' must be set to 'true'.
 """
 function cr(A :: AbstractLinearOperator, b :: AbstractVector{T};
-            M :: AbstractLinearOperator=opEye(size(A,1)), atol :: Float64=1.0e-8,
+            M :: AbstractLinearOperator=opEye(), atol :: Float64=1.0e-8,
             rtol :: Float64=1.0e-6, γ :: Float64=1.0e-6, itmax :: Int=0,
             radius :: Float64=0.0, verbose :: Bool=false, linesearch :: Bool=false) where T <: Number
 
@@ -29,7 +29,7 @@ function cr(A :: AbstractLinearOperator, b :: AbstractVector{T};
   # Initial state.
   x = zeros(T, n) # initial estimation x = 0
   xNorm = 0.0
-  r = M * b # initial residual r = M * (b - Ax) = M * b
+  r = copy(M * b) # initial residual r = M * (b - Ax) = M * b
   Ar = A * r
   ρ = @kdot(n, r, Ar)
   ρ == 0.0 && return (x, SimpleStats(true, false, [0.0], [], "x = 0 is a zero-residual solution"))
diff --git a/src/crls.jl b/src/crls.jl
index 065d774eb..18367cf66 100644
--- a/src/crls.jl
+++ b/src/crls.jl
@@ -37,7 +37,7 @@ It is formally equivalent to LSMR, though can be slightly less accurate,
 but simpler to implement.
 """
 function crls(A :: AbstractLinearOperator, b :: AbstractVector{T};
-              M :: AbstractLinearOperator=opEye(size(b,1)), λ :: Float64=0.0,
+              M :: AbstractLinearOperator=opEye(), λ :: Float64=0.0,
               atol :: Float64=1.0e-8, rtol :: Float64=1.0e-6, radius :: Float64=0.0,
               itmax :: Int=0, verbose :: Bool=false) where T <: Number
 
diff --git a/src/crmr.jl b/src/crmr.jl
index e99f6ab99..8e7dd3059 100644
--- a/src/crmr.jl
+++ b/src/crmr.jl
@@ -53,7 +53,7 @@ but simpler to implement. Only the x-part of the solution is returned.
 A preconditioner M may be provided in the form of a linear operator.
 """
 function crmr(A :: AbstractLinearOperator, b :: AbstractVector{T};
-              M :: AbstractLinearOperator=opEye(size(A,1)), λ :: Float64=0.0,
+              M :: AbstractLinearOperator=opEye(), λ :: Float64=0.0,
               atol :: Float64=1.0e-8, rtol :: Float64=1.0e-6,
               itmax :: Int=0, verbose :: Bool=false) where T <: Number
 
diff --git a/src/diom.jl b/src/diom.jl
index e63f1a868..9c2e8a223 100644
--- a/src/diom.jl
+++ b/src/diom.jl
@@ -23,7 +23,7 @@ and indefinite systems of linear equations can be handled by this single algorit
 This implementation allows a right preconditioner M.
 """
 function diom(A :: AbstractLinearOperator, b :: AbstractVector{T};
-              M :: AbstractLinearOperator=opEye(size(A,1)),
+              M :: AbstractLinearOperator=opEye(),
               atol :: Float64=1.0e-8, rtol :: Float64=1.0e-6,
               itmax :: Int=0, memory :: Int=20, verbose :: Bool=false) where T <: Number
 
diff --git a/src/dqgmres.jl b/src/dqgmres.jl
index feaef724a..2a7ca87f2 100644
--- a/src/dqgmres.jl
+++ b/src/dqgmres.jl
@@ -21,7 +21,7 @@ and computes a sequence of approximate solutions with the quasi-minimal residual
 This implementation allows a right preconditioner M.
 """
 function dqgmres(A :: AbstractLinearOperator, b :: AbstractVector{T};
-                 M :: AbstractLinearOperator=opEye(size(A,1)), atol :: Float64=1.0e-8,
+                 M :: AbstractLinearOperator=opEye(), atol :: Float64=1.0e-8,
                  rtol :: Float64=1.0e-6, itmax :: Int=0, memory :: Int=20,
                  verbose :: Bool=false) where T <: Number
 
diff --git a/src/lslq.jl b/src/lslq.jl
index c875e0dfa..984acb9da 100644
--- a/src/lslq.jl
+++ b/src/lslq.jl
@@ -93,8 +93,8 @@ The iterations stop as soon as one of the following conditions holds true:
 * R. Estrin, D. Orban and M. A. Saunders, *LSLQ: An Iterative Method for Linear Least-Squares with an Error Minimization Property*, Cahier du GERAD G-2017-xx, GERAD, Montreal, 2017.
 """
 function lslq(A :: AbstractLinearOperator, b :: AbstractVector{T};
-              M :: AbstractLinearOperator=opEye(size(A,1)),
-              N :: AbstractLinearOperator=opEye(size(A,2)),
+              M :: AbstractLinearOperator=opEye(),
+              N :: AbstractLinearOperator=opEye(),
               sqd :: Bool=false, λ :: Float64=0.0, σ :: Float64=0.0,
               atol :: Float64=1.0e-8, btol :: Float64=1.0e-8, etol :: Float64=1.0e-8,
               window :: Int=5, utol :: Float64=1.0e-8, itmax :: Int=0,
@@ -104,6 +104,10 @@ function lslq(A :: AbstractLinearOperator, b :: AbstractVector{T};
   size(b, 1) == m || error("Inconsistent problem size")
   verbose && @printf("LSLQ: system of %d equations in %d variables\n", m, n)
 
+  # Tests M == Iₙ and N == Iₘ
+  MisI = isa(M, opEye)
+  NisI = isa(N, opEye)
+
   # If solving an SQD system, set regularization to 1.
   sqd && (λ = 1.0)
   λ² = λ * λ
@@ -125,7 +129,7 @@ function lslq(A :: AbstractLinearOperator, b :: AbstractVector{T};
   β = β₁
 
   @kscal!(m, 1.0/β₁, u)
-  @kscal!(m, 1.0/β₁, Mu)
+  MisI || @kscal!(m, 1.0/β₁, Mu)
   Nv = copy(A' * u)
   v = N * Nv
   α = sqrt(@kdot(n, v, Nv))  # = α₁
@@ -134,7 +138,7 @@ function lslq(A :: AbstractLinearOperator, b :: AbstractVector{T};
   α == 0.0 && return (x_lq, x_cg, err_lbnds, err_ubnds_lq, err_ubnds_cg,
                       SimpleStats(true, false, [β₁], [0.0], "x = 0 is a minimum least-squares solution"))
   @kscal!(n, 1.0/α, v)
-  @kscal!(n, 1.0/α, Nv)
+  NisI || @kscal!(n, 1.0/α, Nv)
 
   Anorm = α
   Anorm² = α * α
@@ -200,7 +204,7 @@ function lslq(A :: AbstractLinearOperator, b :: AbstractVector{T};
     β = sqrt(@kdot(m, u, Mu))
     if β != 0.0
       @kscal!(m, 1.0/β, u)
-      @kscal!(m, 1.0/β, Mu)
+      MisI || @kscal!(m, 1.0/β, Mu)
 
       # 2. αv = A'u - βv
       @kscal!(n, -β, Nv)
@@ -209,7 +213,7 @@ function lslq(A :: AbstractLinearOperator, b :: AbstractVector{T};
       α = sqrt(@kdot(n, v, Nv))
       if α != 0.0
         @kscal!(n, 1.0/α, v)
-        @kscal!(n, 1.0/α, Nv)
+        NisI || @kscal!(n, 1.0/α, Nv)
       end
 
       # rotate out regularization term if present
diff --git a/src/lsmr.jl b/src/lsmr.jl
index 7821a5e2e..c73625439 100644
--- a/src/lsmr.jl
+++ b/src/lsmr.jl
@@ -58,8 +58,8 @@ In this case, `N` can still be specified and indicates the norm
 in which `x` should be measured.
 """
 function lsmr(A :: AbstractLinearOperator, b :: AbstractVector{T};
-              M :: AbstractLinearOperator=opEye(size(A,1)),
-              N :: AbstractLinearOperator=opEye(size(A,2)),
+              M :: AbstractLinearOperator=opEye(),
+              N :: AbstractLinearOperator=opEye(),
               sqd :: Bool=false,
               λ :: Float64=0.0, axtol :: Float64=1.0e-8, btol :: Float64=1.0e-8,
               atol :: Float64=0.0, rtol :: Float64=0.0,
@@ -71,6 +71,10 @@ function lsmr(A :: AbstractLinearOperator, b :: AbstractVector{T};
   size(b, 1) == m || error("Inconsistent problem size")
   verbose && @printf("LSMR: system of %d equations in %d variables\n", m, n)
 
+  # Tests M == Iₙ and N == Iₘ
+  MisI = isa(M, opEye)
+  NisI = isa(N, opEye)
+
   # If solving an SQD system, set regularization to 1.
   sqd && (λ = 1.0)
   ctol = conlim > 0.0 ? 1/conlim : 0.0
@@ -85,7 +89,7 @@ function lsmr(A :: AbstractLinearOperator, b :: AbstractVector{T};
   β = β₁
 
   @kscal!(m, 1.0/β₁, u)
-  @kscal!(m, 1.0/β₁, Mu)
+  MisI || @kscal!(m, 1.0/β₁, Mu)
   Nv = copy(A' * u)
   v = N * Nv
   α = sqrt(@kdot(n, v, Nv))
@@ -130,7 +134,7 @@ function lsmr(A :: AbstractLinearOperator, b :: AbstractVector{T};
   # A'b = 0 so x = 0 is a minimum least-squares solution
   α == 0.0 && return (x, SimpleStats(true, false, [β₁], [0.0], "x = 0 is a minimum least-squares solution"))
   @kscal!(n, 1.0/α, v)
-  @kscal!(n, 1.0/α, Nv)
+  NisI || @kscal!(n, 1.0/α, Nv)
 
   h = copy(v)
   hbar = zeros(T, n)
@@ -151,13 +155,13 @@ function lsmr(A :: AbstractLinearOperator, b :: AbstractVector{T};
 
     # Generate next Golub-Kahan vectors.
     # 1. βu = Av - αu
-    @kscal!(m, -α, Mu,)
+    @kscal!(m, -α, Mu)
     @kaxpy!(m, 1.0, A * v, Mu)
     u = M * Mu
     β = sqrt(@kdot(m, u, Mu))
     if β != 0.0
       @kscal!(m, 1.0/β, u)
-      @kscal!(m, 1.0/β, Mu)
+      MisI || @kscal!(m, 1.0/β, Mu)
 
       # 2. αv = A'u - βv
       @kscal!(n, -β, Nv)
@@ -166,7 +170,7 @@ function lsmr(A :: AbstractLinearOperator, b :: AbstractVector{T};
       α = sqrt(@kdot(n, v, Nv))
       if α != 0.0
         @kscal!(n, 1.0/α, v)
-        @kscal!(n, 1.0/α, Nv)
+        NisI || @kscal!(n, 1.0/α, Nv)
       end
     end
 
diff --git a/src/lsqr.jl b/src/lsqr.jl
index 7674c2f2c..174dff746 100644
--- a/src/lsqr.jl
+++ b/src/lsqr.jl
@@ -58,8 +58,8 @@ In this case, `N` can still be specified and indicates the norm
 in which `x` should be measured.
 """
 function lsqr(A :: AbstractLinearOperator, b :: AbstractVector{T};
-              M :: AbstractLinearOperator=opEye(size(A,1)),
-              N :: AbstractLinearOperator=opEye(size(A,2)),
+              M :: AbstractLinearOperator=opEye(),
+              N :: AbstractLinearOperator=opEye(),
               sqd :: Bool=false,
               λ :: Float64=0.0, axtol :: Float64=1.0e-8, btol :: Float64=1.0e-8,
               atol :: Float64=0.0, rtol :: Float64=0.0,
@@ -71,6 +71,10 @@ function lsqr(A :: AbstractLinearOperator, b :: AbstractVector{T};
   size(b, 1) == m || error("Inconsistent problem size")
   verbose && @printf("LSQR: system of %d equations in %d variables\n", m, n)
 
+  # Tests M == Iₙ and N == Iₘ
+  MisI = isa(M, opEye)
+  NisI = isa(N, opEye)
+
   # If solving an SQD system, set regularization to 1.
   sqd && (λ = 1.0)
   λ² = λ * λ
@@ -86,7 +90,7 @@ function lsqr(A :: AbstractLinearOperator, b :: AbstractVector{T};
   β = β₁
 
   @kscal!(m, 1.0/β₁, u)
-  @kscal!(m, 1.0/β₁, Mu)
+  MisI || @kscal!(m, 1.0/β₁, Mu)
   Nv = copy(A' * u)
   v = N * Nv
   α = sqrt(@kdot(n, v, Nv))
@@ -113,7 +117,7 @@ function lsqr(A :: AbstractLinearOperator, b :: AbstractVector{T};
   # A'b = 0 so x = 0 is a minimum least-squares solution
   α == 0.0 && return (x, SimpleStats(true, false, [β₁], [0.0], "x = 0 is a minimum least-squares solution"))
   @kscal!(n, 1.0/α, v)
-  @kscal!(n, 1.0/α, Nv)
+  NisI || @kscal!(n, 1.0/α, Nv)
   w = copy(v)
 
   # Initialize other constants.
@@ -154,7 +158,7 @@ function lsqr(A :: AbstractLinearOperator, b :: AbstractVector{T};
     β = sqrt(@kdot(m, u, Mu))
     if β != 0.0
       @kscal!(m, 1.0/β, u)
-      @kscal!(m, 1.0/β, Mu)
+      MisI || @kscal!(m, 1.0/β, Mu)
       Anorm² = Anorm² + α * α + β * β;  # = ‖B_{k-1}‖²
       λ > 0.0 && (Anorm² += λ²)
 
@@ -165,7 +169,7 @@ function lsqr(A :: AbstractLinearOperator, b :: AbstractVector{T};
       α = sqrt(@kdot(n, v, Nv))
       if α != 0.0
         @kscal!(n, 1.0/α, v)
-        @kscal!(n, 1.0/α, Nv)
+        NisI || @kscal!(n, 1.0/α, Nv)
       end
     end
 
diff --git a/src/minres.jl b/src/minres.jl
index f1912e6c8..438f389ad 100644
--- a/src/minres.jl
+++ b/src/minres.jl
@@ -43,7 +43,7 @@ A preconditioner M may be provided in the form of a linear operator and is
 assumed to be symmetric and positive definite.
 """
 function minres(A :: AbstractLinearOperator, b :: AbstractVector{T};
-                M :: AbstractLinearOperator=opEye(size(A,1)), λ :: Float64=0.0,
+                M :: AbstractLinearOperator=opEye(), λ :: Float64=0.0,
                 atol :: Float64=1.0e-12, rtol :: Float64=1.0e-12, etol :: Float64=1.0e-8,
                 window :: Int=5, itmax :: Int=0, conlim :: Float64=1.0e+8,
                 verbose :: Bool=false) where T <: Number
diff --git a/src/symmlq.jl b/src/symmlq.jl
index ed35a214d..9300effde 100644
--- a/src/symmlq.jl
+++ b/src/symmlq.jl
@@ -25,7 +25,7 @@ A preconditioner M may be provided in the form of a linear operator and is
 assumed to be symmetric and positive definite.
 """
 function symmlq(A :: AbstractLinearOperator, b :: AbstractVector{T};
-                M :: AbstractLinearOperator=opEye(size(A,1)), λ :: Float64=0.0,
+                M :: AbstractLinearOperator=opEye(), λ :: Float64=0.0,
                 λest :: Float64=0.0, atol :: Float64=1.0e-8, rtol :: Float64=1.0e-8,
                 etol :: Float64=1.0e-8, window :: Int=0, itmax :: Int=0,
                 conlim :: Float64=1.0e+8, verbose :: Bool=false) where T <: Number
@@ -35,6 +35,9 @@ function symmlq(A :: AbstractLinearOperator, b :: AbstractVector{T};
   size(b, 1) == m || error("Inconsistent problem size")
   verbose && @printf("SYMMLQ: system of size %d\n", n)
 
+  # Test M == Iₘ
+  MisI = isa(M, opEye)
+
   ϵM = eps(T)
   x = zeros(T, n)
   ctol = conlim > 0.0 ? 1 ./ conlim : 0.0;
@@ -47,8 +50,8 @@ function symmlq(A :: AbstractLinearOperator, b :: AbstractVector{T};
   β₁ == 0.0 && return (x, x, SimpleStats(true, true, [0.0], [0.0], "x = 0 is a zero-residual solution"))
   β₁ = sqrt(β₁)
   β = β₁
-  vold = @kscal!(m, 1 ./ β, vold)
-  p = @kscal!(m, 1 ./ β, p)
+  @kscal!(m, 1 ./ β, vold)
+  MisI || @kscal!(m, 1 ./ β, p)
 
   v = copy(A * p)
   α = @kdot(m, p, v) + λ
@@ -58,7 +61,7 @@ function symmlq(A :: AbstractLinearOperator, b :: AbstractVector{T};
   β < 0.0 && error("Preconditioner is not positive definite")
   β = sqrt(β)
   @kscal!(m, 1 ./ β, v)
-  @kscal!(m, 1 ./ β, p)
+  MisI || @kscal!(m, 1 ./ β, p)
 
   # Start QR factorization
   γbar = α
@@ -143,7 +146,7 @@ function symmlq(A :: AbstractLinearOperator, b :: AbstractVector{T};
     β < 0.0 && error("Preconditioner is not positive definite")
     β = sqrt(β)
     @kscal!(m, 1 ./ β, v_next)
-    @kscal!(m, 1 ./ β, p)
+    MisI || @kscal!(m, 1 ./ β, p)
 
     # Continue A norm estimate
     ANorm² = ANorm² + α * α + oldβ * oldβ + β * β
diff --git a/test/test_alloc.jl b/test/test_alloc.jl
index a7f2ed24c..63c7658a5 100644
--- a/test/test_alloc.jl
+++ b/test/test_alloc.jl
@@ -1,15 +1,11 @@
-include("test_utils.jl")
-
 L   = get_div_grad(32, 32, 32)
 n   = size(L, 1)
 m   = div(n, 2)
-A   = preallocated_LinearOperator(L) # Dimension n x n
-Au  = preallocated_LinearOperator(L[1:m,:]) # Dimension m x n
-Ao  = preallocated_LinearOperator(L[:,1:m]) # Dimension n x m
+A   = PreallocatedLinearOperator(L) # Dimension n x n
+Au  = PreallocatedLinearOperator(L[1:m,:]) # Dimension m x n
+Ao  = PreallocatedLinearOperator(L[:,1:m]) # Dimension n x m
 b   = ones(n)
 c   = ones(m)
-M   = nonallocating_opEye(n)
-N   = nonallocating_opEye(m)
 mem = 10
 
 # without preconditioner and with Ap preallocated, CG needs 3 n-vectors: x, r, p
@@ -17,8 +13,8 @@ storage_cg(n) = 3 * n
 storage_cg_bytes(n) = 8 * storage_cg(n)
 
 expected_cg_bytes = storage_cg_bytes(n)
-cg(A, b, M=M)  # warmup
-actual_cg_bytes = @allocated cg(A, b, M=M)
+cg(A, b)  # warmup
+actual_cg_bytes = @allocated cg(A, b)
 @test actual_cg_bytes ≤ 1.1 * expected_cg_bytes
 
 # without preconditioner and with Ap preallocated, DIOM needs:
@@ -31,8 +27,8 @@ storage_diom(mem, n) = (2 * n) + (2 * n * mem) + (mem) + (mem + 2) + (mem / 64)
 storage_diom_bytes(mem, n) = 8 * storage_diom(mem, n)
 
 expected_diom_bytes = storage_diom_bytes(mem, n)
-diom(A, b, M=M, memory=mem)  # warmup
-actual_diom_bytes = @allocated diom(A, b, M=M, memory=mem)
+diom(A, b, memory=mem)  # warmup
+actual_diom_bytes = @allocated diom(A, b, memory=mem)
 @test actual_diom_bytes ≤ 1.05 * expected_diom_bytes
 
 # with Ap preallocated, CG-Lanczos needs 4 n-vectors: x, v, v_prev, p
@@ -53,8 +49,8 @@ storage_dqgmres(mem, n) = (n) + (2 * n * mem) + (2 * mem) + (mem + 2)
 storage_dqgmres_bytes(mem, n) = 8 * storage_dqgmres(mem, n)
 
 expected_dqgmres_bytes = storage_dqgmres_bytes(mem, n)
-dqgmres(A, b, M=M, memory=mem)  # warmup
-actual_dqgmres_bytes = @allocated dqgmres(A, b, M=M, memory=mem)
+dqgmres(A, b, memory=mem)  # warmup
+actual_dqgmres_bytes = @allocated dqgmres(A, b, memory=mem)
 @test actual_dqgmres_bytes ≤ 1.05 * expected_dqgmres_bytes
 
 # without preconditioner and with Ap preallocated, CR needs 4 n-vectors: x, r, p, q
@@ -62,16 +58,16 @@ storage_cr(n) = 4 * n
 storage_cr_bytes(n) = 8 * storage_cr(n)
 
 expected_cr_bytes = storage_cr_bytes(n)
-cr(A, b, M=M)  # warmup
-actual_cr_bytes = @allocated cr(A, b, M=M)
+cr(A, b)  # warmup
+actual_cr_bytes = @allocated cr(A, b)
 @test actual_cr_bytes ≤ 1.1 * expected_cr_bytes
 
 # without preconditioner and with Ap preallocated, CGS needs 5 n-vectors: x, r, u, p, q
 storage_cgs(n) = 5 * n
 storage_cgs_bytes(n) = 8 * storage_cgs(n)
 expected_cgs_bytes = storage_cgs_bytes(n)
-cgs(A, b, M=M)  # warmup
-actual_cgs_bytes = @allocated cgs(A, b, M=M)
+cgs(A, b)  # warmup
+actual_cgs_bytes = @allocated cgs(A, b)
 @test actual_cgs_bytes ≤ 1.1 * expected_cgs_bytes
 
 # without preconditioner and with (Ap, Aᵀq) preallocated, CGNE needs:
@@ -80,8 +76,8 @@ actual_cgs_bytes = @allocated cgs(A, b, M=M)
 storage_cgne(n, m) = 2*n + m
 storage_cgne_bytes(n, m) = 8 * storage_cgne(n, m)
 expected_cgne_bytes = storage_cgne_bytes(n, m)
-(x, stats) = cgne(Au, c, M=N)  # warmup
-actual_cgne_bytes = @allocated cgne(Au, c, M=N)
+(x, stats) = cgne(Au, c)  # warmup
+actual_cgne_bytes = @allocated cgne(Au, c)
 @test actual_cgne_bytes ≤ 1.1 * expected_cgne_bytes
 
 # without preconditioner and with (Ap, Aᵀq) preallocated, CGLS needs:
@@ -90,6 +86,6 @@ actual_cgne_bytes = @allocated cgne(Au, c, M=N)
 storage_cgls(n, m) = 2*m + n
 storage_cgls_bytes(n, m) = 8 * storage_cgls(n, m)
 expected_cgls_bytes = storage_cgls_bytes(n, m)
-(x, stats) = cgls(Ao, b, M=M)  # warmup
-actual_cgls_bytes = @allocated cgls(Ao, b, M=M)
+(x, stats) = cgls(Ao, b)  # warmup
+actual_cgls_bytes = @allocated cgls(Ao, b)
 @test actual_cgls_bytes ≤ 1.1 * expected_cgls_bytes
diff --git a/test/test_utils.jl b/test/test_utils.jl
deleted file mode 100644
index e4be0a3a9..000000000
--- a/test/test_utils.jl
+++ /dev/null
@@ -1,19 +0,0 @@
-using FastClosures
-
-function preallocated_LinearOperator(A)
-  (n, m) = size(A)
-  Ap = zeros(n)
-  prod = @closure p -> mul!(Ap, A, p)
-  Atq = zeros(m)
-  tprod = @closure q -> mul!(Atq, transpose(A), q)
-  T = eltype(A)
-  F1 = typeof(prod)
-  F2 = typeof(tprod)
-  LinearOperator{T,F1,F2,F2}(n, m, false, false, prod, tprod, tprod)
-end
-
-function nonallocating_opEye(n)
-  prod = @closure v -> v
-  F = typeof(prod)
-  LinearOperator{Float64,F,F,F}(n, n, true, true, prod, prod, prod)
-end