Skip to content

Commit

Permalink
remove unused code
Browse files Browse the repository at this point in the history
  • Loading branch information
joostvanzwieten committed Apr 19, 2024
1 parent 4c138e1 commit a48ac1f
Showing 1 changed file with 9 additions and 150 deletions.
159 changes: 9 additions & 150 deletions nutils/evaluable.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,16 +147,6 @@ def __init__(self, args: typing.Tuple['Evaluable', ...]):
def evalf(*args):
raise NotImplementedError('Evaluable derivatives should implement the evalf method')

@cached_property
def dependencies(self):
'''collection of all function arguments'''
deps = {}
for func in self.__args:
funcdeps = func.dependencies
deps.update(funcdeps)
deps[func] = len(funcdeps)
return types.frozendict(deps)

@cached_property
def arguments(self):
'a frozenset of all arguments of this evaluable'
Expand All @@ -166,41 +156,6 @@ def arguments(self):
def isconstant(self):
return not self.arguments

@cached_property
def ordereddeps(self):
'''collection of all function arguments such that the arguments to
dependencies[i] can be found in dependencies[:i]'''
deps = self.dependencies.copy()
deps.pop(EVALARGS, None)
return tuple([EVALARGS] + sorted(deps, key=deps.__getitem__))

@cached_property
def dependencytree(self):
'''lookup table of function arguments into ordereddeps, such that
ordereddeps[i].__args[j] == ordereddeps[dependencytree[i][j]], and
self.__args[j] == ordereddeps[dependencytree[-1][j]]'''
args = self.ordereddeps
return tuple(tuple(map(args.index, func.__args)) for func in args+(self,))

@property
def serialized(self):
return zip(self.ordereddeps[1:]+(self,), self.dependencytree[1:])

# This property is a derivation of `ordereddeps[1:]` where the `Evaluable`
# instances are mapped to the `evalf` methods of the instances. Asserting
# that functions are immutable is difficult and currently
# `types._isimmutable` marks all functions as mutable. Since the
# `types.CacheMeta` machinery asserts immutability of the property, we have
# to resort to a regular `functools.cached_property`. Nevertheless, this
# property should be treated as if it is immutable.
@cached_property
def _serialized_evalf_head(self):
return tuple(op.evalf for op in self.ordereddeps[1:])

@property
def _serialized_evalf(self):
return zip(itertools.chain(self._serialized_evalf_head, (self.evalf,)), self.dependencytree[1:])

def _node(self, cache, subgraph, times, unique_loops):
if self in cache:
return cache[self]
Expand Down Expand Up @@ -239,34 +194,6 @@ def eval(self, **evalargs):

return self._compiled(**evalargs)

def _iter_stack(self):
yield '%0 = EVALARGS'
for i, (op, indices) in enumerate(self.serialized, start=1):
s = [f'%{i} = {op}']
if indices:
args = [f'%{i}' for i in indices]
try:
sig = inspect.signature(op.evalf)
except ValueError:
pass
else:
for i, param in enumerate(sig.parameters.values()):
if i < len(args) and param.kind == param.POSITIONAL_OR_KEYWORD:
args[i] = param.name + '=' + args[i]
s.extend(args)
yield ' '.join(s)

def _format_stack(self, values, e):
lines = [f'evaluation failed in step {len(values)}/{len(self.dependencies)+1}']
stack = self._iter_stack()
for v, op in zip(values, stack): # NOTE values must come first to avoid popping next item from stack
s = f'{type(v).__name__}'
if numeric.isarray(v):
s += f'<{v.dtype.kind}:{",".join(str(n) for n in v.shape)}>'
lines.append(f'{op} --> {s}')
lines.append(f'{next(stack)} --> {e}')
return '\n '.join(lines)

@util.deep_replace_property
def simplified(obj):
retval = obj._simplified()
Expand Down Expand Up @@ -367,17 +294,6 @@ def _compile_copy_to(self, builder, out, alloc_block_id):
return NotImplemented


class EVALARGS(Evaluable):
def __init__(self):
super().__init__(args=())

def _node(self, cache, subgraph, times, unique_loops):
return InvisibleNode((type(self).__name__, _Stats()))


EVALARGS = EVALARGS()


class Tuple(Evaluable):

def __init__(self, items):
Expand Down Expand Up @@ -4441,8 +4357,11 @@ def __init__(self, loop_id: _LoopId, length: Array, init_args: typing.Tuple[Eval
self.body_args = body_args
if any(self.index in arg.arguments for arg in init_args):
raise ValueError('the loop initialization arguments must not depend on the index')
self._invariants, self._dependencies = _dependencies_sans_invariants(Tuple(body_args), self.index)
super().__init__(args=(length, Tuple(init_args), *self._invariants), *args, **kwargs)
super().__init__(args=init_args + body_args, *args, **kwargs)

@cached_property
def _const_value(self):
return self.eval()

@cached_property
def _const_value(self):
Expand All @@ -4459,48 +4378,6 @@ def _compile_block_id(self):
block_id = self._loop_block_id
return *block_id[:-1], block_id[-1] + 1

@cached_property
def _serialized_loop(self):
indices = {d: i for i, d in enumerate(itertools.chain([self.index], self._invariants, self._dependencies))}
return tuple((dep, tuple(map(indices.__getitem__, dep._Evaluable__args))) for dep in self._dependencies)

@cached_property
def _serialized_loop_evalf(self):
return tuple((dep.evalf, indices) for dep, indices in self._serialized_loop)

def evalf(self, length, init_args, *invariants):
serialized_evalf = self._serialized_loop_evalf
output = self.evalf_loop_init(*init_args)
length = length.__index__()
values = [None] + list(invariants) + [None] * len(serialized_evalf)
with log.context(f'loop {self.index.id}'.replace('{', '{{').replace('}', '}}') + ' {:3.0f}%', 0) as log_ctx:
fork = parallel.fork(length)
if fork:
raw_index = multiprocessing.RawValue('i', 0)
lock = multiprocessing.Lock()
with fork as pid:
with lock:
index = raw_index.value
raw_index.value = index + 1
while index < length:
if not pid:
log_ctx(100*index/length)
values[0] = numpy.array(index)
for o, (op_evalf, indices) in enumerate(serialized_evalf, len(invariants) + 1):
values[o] = op_evalf(*[values[i] for i in indices])
with lock:
self.evalf_loop_body(output, *values[-1])
index = raw_index.value
raw_index.value = index + 1
else:
for index in range(length):
values[0] = numpy.array(index)
for o, (op_evalf, indices) in enumerate(serialized_evalf, len(invariants) + 1):
values[o] = op_evalf(*[values[i] for i in indices])
self.evalf_loop_body(output, *values[-1])
log_ctx(100*(index+1)/length)
return output

def _node(self, cache, subgraph, times, unique_loops):
if (cached := cache.get(self)) is not None:
return cached
Expand All @@ -4525,6 +4402,10 @@ def _node(self, cache, subgraph, times, unique_loops):
cache[self] = node = self._node_loop_body(loopcache, loopgraph, looptimes, unique_loops)
return node

@cached_property
def arguments(self):
return super().arguments - frozenset({self.index})

@property
def _loop_deps(self):
deps = util.IDSet([self])
Expand Down Expand Up @@ -4831,28 +4712,6 @@ def _isunique(array):
return numpy.unique(array).size == array.size


_AddDependency = collections.namedtuple('_AddDependency', ['dependency'])

def _dependencies_sans_invariants(func, arg):
invariants = []
dependencies = []
cache = {arg}
stack = [func]
while stack:
func_ = stack.pop()
if isinstance(func_, _AddDependency):
dependencies.append(func_.dependency)
elif func_ not in cache:
cache.add(func_)
if arg in func_.arguments:
stack.append(_AddDependency(func_))
stack.extend(func_._Evaluable__args)
else:
invariants.append(func_)
assert (dependencies or invariants or [arg])[-1] == func
return tuple(invariants), tuple(dependencies)


def _make_loop_ids_unique(funcs: typing.Tuple[Evaluable, ...]) -> typing.Tuple[Evaluable, ...]:
# Replaces all `_LoopId` instances such that every distinct `Loop` has its
# own loop id. The loops are traversed depth-first by recursively calling
Expand Down

0 comments on commit a48ac1f

Please sign in to comment.