Source code for devito.builtins

```"""
Built-in Operators provided by Devito.
"""

from sympy import Abs, Pow
import numpy as np

import devito as dv

__all__ = ['assign', 'smooth', 'norm', 'sumall', 'inner', 'mmin', 'mmax']

[docs]def assign(f, v=0):
"""
Assign a value to a Function.

Parameters
----------
f : Function
The left-hand side of the assignment.
v : scalar, optional
The right-hand side of the assignment.
"""
dv.Operator(dv.Eq(f, v), name='assign')()

[docs]def smooth(f, g, axis=None):
"""
Smooth a Function through simple moving average.

Parameters
----------
f : Function
The left-hand side of the smoothing kernel, that is the smoothed Function.
g : Function
The right-hand side of the smoothing kernel, that is the Function being smoothed.
axis : Dimension or list of Dimensions, optional
The Dimension along which the smoothing operation is performed. Defaults
to ``f``'s innermost Dimension.

Notes
-----

https://en.wikipedia.org/wiki/Moving_average#Simple_moving_average
"""
if g.is_Constant:
# Return a scaled version of the input if it's a Constant
f.data[:] = .9 * g.data
else:
if axis is None:
axis = g.dimensions[-1]
dv.Operator(dv.Eq(f, g.avg(dims=axis)), name='smoother')()

# Reduction-inducing builtins

class MPIReduction(object):
"""
A context manager to build MPI-aware reduction Operators.
"""

def __init__(self, *functions, op=dv.mpi.MPI.SUM):
grids = {f.grid for f in functions}
if len(grids) == 0:
self.grid = None
elif len(grids) == 1:
self.grid = grids.pop()
else:
raise ValueError("Multiple Grids found")
dtype = {f.dtype for f in functions}
if len(dtype) == 1:
self.dtype = dtype.pop()
else:
raise ValueError("Illegal mixed data types")
self.v = None
self.op = op

def __enter__(self):
i = dv.Dimension(name='i',)
self.n = dv.Function(name='n', shape=(1,), dimensions=(i,),
grid=self.grid, dtype=self.dtype)
self.n.data = 0
return self

def __exit__(self, exc_type, exc_value, traceback):
if self.grid is None or not dv.configuration['mpi']:
assert self.n.data.size == 1
self.v = self.n.data
else:
comm = self.grid.distributor.comm
self.v = comm.allreduce(np.asarray(self.n.data), self.op)

[docs]def norm(f, order=2):
"""
Compute the norm of a Function.

Parameters
----------
f : Function
Input Function.
order : int, optional
The order of the norm. Defaults to 2.
"""
kwargs = {}
if f.is_TimeFunction and f._time_buffering:
kwargs[f.time_dim.max_name] = f._time_size - 1

# Protect SparseFunctions from accessing duplicated (out-of-domain) data,
# otherwise we would eventually be summing more than expected
p, eqns = f.guard() if f.is_SparseFunction else (f, [])

with MPIReduction(f) as mr:
op = dv.Operator(eqns + [dv.Inc(mr.n, Abs(Pow(p, order)))],
name='norm%d' % order)
op.apply(**kwargs)

v = Pow(mr.v, 1/order)

return np.float(v)

[docs]def sumall(f):
"""
Compute the sum of all Function data.

Parameters
----------
f : Function
Input Function.
"""
kwargs = {}
if f.is_TimeFunction and f._time_buffering:
kwargs[f.time_dim.max_name] = f._time_size - 1

# Protect SparseFunctions from accessing duplicated (out-of-domain) data,
# otherwise we would eventually be summing more than expected
p, eqns = f.guard() if f.is_SparseFunction else (f, [])

with MPIReduction(f) as mr:
op = dv.Operator(eqns + [dv.Inc(mr.n, p)], name='sum')
op.apply(**kwargs)

return np.float(mr.v)

[docs]def inner(f, g):
"""
Inner product of two Functions.

Parameters
----------
f : Function
First input operand
g : Function
Second input operand

Raises
------
ValueError
If the two input Functions are defined over different grids, or have
different dimensionality, or their dimension-wise sizes don't match.
If in input are two SparseFunctions and their coordinates don't match,
the exception is raised.

Notes
-----
The inner product is the sum of all dimension-wise products. For 1D Functions,
the inner product corresponds to the dot product.
"""
# Input check
if f.is_TimeFunction and f._time_buffering != g._time_buffering:
raise ValueError("Cannot compute `inner` between save/nosave TimeFunctions")
if f.shape != g.shape:
raise ValueError("`f` and `g` must have same shape")
if f._data is None or g._data is None:
raise ValueError("Uninitialized input")
if f.is_SparseFunction and not np.all(f.coordinates_data == g.coordinates_data):
raise ValueError("Non-matching coordinates")

kwargs = {}
if f.is_TimeFunction and f._time_buffering:
kwargs[f.time_dim.max_name] = f._time_size - 1

# Protect SparseFunctions from accessing duplicated (out-of-domain) data,
# otherwise we would eventually be summing more than expected
rhs, eqns = f.guard(f*g) if f.is_SparseFunction else (f*g, [])

with MPIReduction(f, g) as mr:
op = dv.Operator(eqns + [dv.Inc(mr.n, rhs)], name='inner')
op.apply(**kwargs)

return np.float(mr.v)

[docs]def mmin(f):
"""
Retrieve the minimum.

Parameters
----------
f : array_like or Function
Input operand.
"""
if not isinstance(f, dv.Function):
return np.min(f)

with MPIReduction(f, op=dv.mpi.MPI.MIN) as mr:
mr.n.data = np.min(f.data_ro_domain).item()
return mr.v.item()

[docs]def mmax(f):
"""
Retrieve the maximum.

Parameters
----------
f : array_like or Function
Input operand.
"""
if not isinstance(f, dv.Function):
return np.max(f)

with MPIReduction(f, op=dv.mpi.MPI.MAX) as mr:
mr.n.data = np.max(f.data_ro_domain).item()
return mr.v.item()
```