Skip to content

anypinn.core.nn

Neural network primitives and building blocks for PINN.

ArgsRegistry: TypeAlias = dict[str, Argument] module-attribute

FieldsRegistry: TypeAlias = dict[str, Field] module-attribute

ParamsRegistry: TypeAlias = dict[str, Parameter] module-attribute

Argument

A fixed (non-learnable) argument passed to an ODE/PDE function.

Wraps a float constant or a callable and provides a uniform __call__ interface. See also Parameter for the learnable variant.

Parameters:

Name Type Description Default
value float | Callable[[Tensor], Tensor]

The value (float) or function (callable).

required
Example

beta = Argument(0.3) beta(torch.tensor([1.0])) tensor(0.3000) beta_fn = Argument(lambda t: 0.3 * torch.exp(-0.1 * t)) beta_fn(torch.tensor([0.0])) tensor([0.3000])

Source code in src/anypinn/core/nn.py
class Argument:
    """
    A fixed (non-learnable) argument passed to an ODE/PDE function.

    Wraps a float constant or a callable and provides a uniform
    ``__call__`` interface. See also ``Parameter`` for the learnable
    variant.

    Args:
        value: The value (float) or function (callable).

    Example:
        >>> beta = Argument(0.3)
        >>> beta(torch.tensor([1.0]))
        tensor(0.3000)
        >>> beta_fn = Argument(lambda t: 0.3 * torch.exp(-0.1 * t))
        >>> beta_fn(torch.tensor([0.0]))
        tensor([0.3000])
    """

    def __init__(self, value: float | Callable[[Tensor], Tensor]):
        self._value: float | Callable[[Tensor], Tensor] = value
        self._callable = callable(value) and not isinstance(value, (int, float))
        self._tensor_cache: dict[torch.device, Tensor] = {}

    def __call__(self, x: Tensor) -> Tensor:
        """
        Evaluate the argument.

        Args:
            x: Input tensor (context).

        Returns:
            The value of the argument, broadcasted if necessary.
        """
        if self._callable:
            fn = cast(Callable[[Tensor], Tensor], self._value)
            return fn(x)
        device = x.device
        if device not in self._tensor_cache:
            self._tensor_cache[device] = torch.tensor(self._value, device=device)
        return self._tensor_cache[device]

    @override
    def __repr__(self) -> str:
        return f"Argument(value={self._value})"

__call__(x: Tensor) -> Tensor

Evaluate the argument.

Parameters:

Name Type Description Default
x Tensor

Input tensor (context).

required

Returns:

Type Description
Tensor

The value of the argument, broadcasted if necessary.

Source code in src/anypinn/core/nn.py
def __call__(self, x: Tensor) -> Tensor:
    """
    Evaluate the argument.

    Args:
        x: Input tensor (context).

    Returns:
        The value of the argument, broadcasted if necessary.
    """
    if self._callable:
        fn = cast(Callable[[Tensor], Tensor], self._value)
        return fn(x)
    device = x.device
    if device not in self._tensor_cache:
        self._tensor_cache[device] = torch.tensor(self._value, device=device)
    return self._tensor_cache[device]

__init__(value: float | Callable[[Tensor], Tensor])

Source code in src/anypinn/core/nn.py
def __init__(self, value: float | Callable[[Tensor], Tensor]):
    self._value: float | Callable[[Tensor], Tensor] = value
    self._callable = callable(value) and not isinstance(value, (int, float))
    self._tensor_cache: dict[torch.device, Tensor] = {}

__repr__() -> str

Source code in src/anypinn/core/nn.py
@override
def __repr__(self) -> str:
    return f"Argument(value={self._value})"

Domain dataclass

N-dimensional rectangular domain.

Attributes:

Name Type Description
bounds list[tuple[float, float]]

Per-dimension (min, max) pairs. bounds[i] covers dimension i.

dx list[float] | None

Per-dimension step size (None when not applicable).

Source code in src/anypinn/core/nn.py
@dataclass
class Domain:
    """
    N-dimensional rectangular domain.

    Attributes:
        bounds: Per-dimension (min, max) pairs. ``bounds[i]`` covers dimension i.
        dx: Per-dimension step size (``None`` when not applicable).
    """

    bounds: list[tuple[float, float]]
    dx: list[float] | None = None

    @property
    def ndim(self) -> int:
        """Number of spatial dimensions."""
        return len(self.bounds)

    @property
    def x0(self) -> float:
        """Lower bound of the first dimension (convenience for 1-D / time-axis access)."""
        return self.bounds[0][0]

    @property
    def x1(self) -> float:
        """Upper bound of the first dimension."""
        return self.bounds[0][1]

    @classmethod
    def from_x(cls, x: Tensor) -> Domain:
        """
        Infer domain bounds and step sizes from a coordinate tensor of shape (N, d).

        Args:
            x: Coordinate tensor of shape ``(N, d)``.

        Returns:
            Domain with bounds and dx inferred from the data.

        Example:
            >>> coords = torch.linspace(0, 10, 100).unsqueeze(1)
            >>> domain = Domain.from_x(coords)
            >>> domain.x0, domain.x1
            (0.0, 10.0)
        """
        if x.ndim != 2:
            raise ValueError(f"Expected 2-D coordinate tensor (N, d), got shape {tuple(x.shape)}.")
        if x.shape[0] < 2:
            raise ValueError(
                f"At least two points are required to infer the domain, got {x.shape[0]}."
            )

        d = x.shape[1]
        bounds = [(x[:, i].min().item(), x[:, i].max().item()) for i in range(d)]
        dx = [(x[1, i] - x[0, i]).item() for i in range(d)]
        return cls(bounds=bounds, dx=dx)

    @override
    def __repr__(self) -> str:
        return f"Domain(ndim={self.ndim}, bounds={self.bounds}, dx={self.dx})"

bounds: list[tuple[float, float]] instance-attribute

dx: list[float] | None = None class-attribute instance-attribute

ndim: int property

Number of spatial dimensions.

x0: float property

Lower bound of the first dimension (convenience for 1-D / time-axis access).

x1: float property

Upper bound of the first dimension.

__init__(bounds: list[tuple[float, float]], dx: list[float] | None = None) -> None

__repr__() -> str

Source code in src/anypinn/core/nn.py
@override
def __repr__(self) -> str:
    return f"Domain(ndim={self.ndim}, bounds={self.bounds}, dx={self.dx})"

from_x(x: Tensor) -> Domain classmethod

Infer domain bounds and step sizes from a coordinate tensor of shape (N, d).

Parameters:

Name Type Description Default
x Tensor

Coordinate tensor of shape (N, d).

required

Returns:

Type Description
Domain

Domain with bounds and dx inferred from the data.

Example

coords = torch.linspace(0, 10, 100).unsqueeze(1) domain = Domain.from_x(coords) domain.x0, domain.x1 (0.0, 10.0)

Source code in src/anypinn/core/nn.py
@classmethod
def from_x(cls, x: Tensor) -> Domain:
    """
    Infer domain bounds and step sizes from a coordinate tensor of shape (N, d).

    Args:
        x: Coordinate tensor of shape ``(N, d)``.

    Returns:
        Domain with bounds and dx inferred from the data.

    Example:
        >>> coords = torch.linspace(0, 10, 100).unsqueeze(1)
        >>> domain = Domain.from_x(coords)
        >>> domain.x0, domain.x1
        (0.0, 10.0)
    """
    if x.ndim != 2:
        raise ValueError(f"Expected 2-D coordinate tensor (N, d), got shape {tuple(x.shape)}.")
    if x.shape[0] < 2:
        raise ValueError(
            f"At least two points are required to infer the domain, got {x.shape[0]}."
        )

    d = x.shape[1]
    bounds = [(x[:, i].min().item(), x[:, i].max().item()) for i in range(d)]
    dx = [(x[1, i] - x[0, i]).item() for i in range(d)]
    return cls(bounds=bounds, dx=dx)

Field

Bases: Module

A neural field mapping coordinates to a vector of state variables.

For an ODE this maps t -> [S, I, R]; for a PDE it maps (x, t) -> u(x, t).

Parameters:

Name Type Description Default
config MLPConfig

Configuration for the MLP backing this field.

required
Example

field = Field(MLPConfig( ... in_dim=1, out_dim=3, ... hidden_layers=[32, 32], ... activation="tanh", ... )) t = torch.rand(10, 1) field(t).shape torch.Size([10, 3])

Source code in src/anypinn/core/nn.py
class Field(nn.Module):
    """
    A neural field mapping coordinates to a vector of state variables.

    For an ODE this maps ``t -> [S, I, R]``; for a PDE it maps
    ``(x, t) -> u(x, t)``.

    Args:
        config: Configuration for the MLP backing this field.

    Example:
        >>> field = Field(MLPConfig(
        ...     in_dim=1, out_dim=3,
        ...     hidden_layers=[32, 32],
        ...     activation="tanh",
        ... ))
        >>> t = torch.rand(10, 1)
        >>> field(t).shape
        torch.Size([10, 3])
    """

    def __init__(
        self,
        config: MLPConfig,
    ):
        super().__init__()
        encode = config.encode
        if isinstance(encode, nn.Module):
            # registers → participates in .to(), .state_dict()
            self.encoder: nn.Module | None = encode
        else:
            self.encoder = None
        self._encode_fn = encode  # callable reference (module or plain fn)
        dims = [config.in_dim] + config.hidden_layers + [config.out_dim]
        act = get_activation(config.activation)

        layers: list[nn.Module] = []
        for i in range(len(dims) - 1):
            layers.append(nn.Linear(dims[i], dims[i + 1]))
            if i < len(dims) - 2:
                layers.append(act)

        if config.output_activation is not None:
            out_act = get_activation(config.output_activation)
            layers.append(out_act)

        self.net = nn.Sequential(*layers)
        self.apply(self._init)

    @staticmethod
    def _init(m: nn.Module) -> None:
        if isinstance(m, nn.Linear):
            nn.init.xavier_normal_(m.weight)
            nn.init.zeros_(m.bias)

    @override
    def forward(self, x: Tensor) -> Tensor:
        """
        Forward pass of the field.

        Args:
            x: Input coordinates (e.g. time, space).

        Returns:
            The values of the field at input coordinates.
        """
        if self._encode_fn is not None:
            x = self._encode_fn(x)
        return cast(Tensor, self.net(x))

encoder: nn.Module | None = encode instance-attribute

net = nn.Sequential(*layers) instance-attribute

__init__(config: MLPConfig)

Source code in src/anypinn/core/nn.py
def __init__(
    self,
    config: MLPConfig,
):
    super().__init__()
    encode = config.encode
    if isinstance(encode, nn.Module):
        # registers → participates in .to(), .state_dict()
        self.encoder: nn.Module | None = encode
    else:
        self.encoder = None
    self._encode_fn = encode  # callable reference (module or plain fn)
    dims = [config.in_dim] + config.hidden_layers + [config.out_dim]
    act = get_activation(config.activation)

    layers: list[nn.Module] = []
    for i in range(len(dims) - 1):
        layers.append(nn.Linear(dims[i], dims[i + 1]))
        if i < len(dims) - 2:
            layers.append(act)

    if config.output_activation is not None:
        out_act = get_activation(config.output_activation)
        layers.append(out_act)

    self.net = nn.Sequential(*layers)
    self.apply(self._init)

forward(x: Tensor) -> Tensor

Forward pass of the field.

Parameters:

Name Type Description Default
x Tensor

Input coordinates (e.g. time, space).

required

Returns:

Type Description
Tensor

The values of the field at input coordinates.

Source code in src/anypinn/core/nn.py
@override
def forward(self, x: Tensor) -> Tensor:
    """
    Forward pass of the field.

    Args:
        x: Input coordinates (e.g. time, space).

    Returns:
        The values of the field at input coordinates.
    """
    if self._encode_fn is not None:
        x = self._encode_fn(x)
    return cast(Tensor, self.net(x))

Parameter

Bases: Module, Argument

A learnable parameter that participates in gradient optimization.

Supports scalar parameters (a single trainable value) or function-valued parameters (e.g. beta(t)) backed by a small MLP. Because Parameter is a subclass of Argument, it can be used anywhere an Argument is expected.

Parameters:

Name Type Description Default
config ScalarConfig | MLPConfig

Configuration for the parameter (ScalarConfig or MLPConfig).

required
Example

Scalar parameter starting at 0.3

beta = Parameter(ScalarConfig(init_value=0.3)) beta(torch.tensor([1.0])) # returns ~0.3

Function-valued parameter beta(t)

beta_t = Parameter(MLPConfig( ... in_dim=1, out_dim=1, ... hidden_layers=[8], ... activation="tanh", ... ))

Source code in src/anypinn/core/nn.py
class Parameter(nn.Module, Argument):
    """
    A learnable parameter that participates in gradient optimization.

    Supports scalar parameters (a single trainable value) or
    function-valued parameters (e.g. beta(t)) backed by a small MLP.
    Because ``Parameter`` is a subclass of ``Argument``, it can be
    used anywhere an ``Argument`` is expected.

    Args:
        config: Configuration for the parameter (ScalarConfig or MLPConfig).

    Example:
        >>> # Scalar parameter starting at 0.3
        >>> beta = Parameter(ScalarConfig(init_value=0.3))
        >>> beta(torch.tensor([1.0]))  # returns ~0.3
        >>> # Function-valued parameter beta(t)
        >>> beta_t = Parameter(MLPConfig(
        ...     in_dim=1, out_dim=1,
        ...     hidden_layers=[8],
        ...     activation="tanh",
        ... ))
    """

    def __init__(
        self,
        config: ScalarConfig | MLPConfig,
    ):
        super().__init__()
        self.config = config
        self._mode: Literal["scalar", "mlp"]

        if isinstance(config, ScalarConfig):
            self._mode = "scalar"
            self.value = nn.Parameter(torch.tensor(float(config.init_value), dtype=torch.float32))

        else:  # isinstance(config, MLPConfig)
            self._mode = "mlp"
            dims = [config.in_dim] + config.hidden_layers + [config.out_dim]
            act = get_activation(config.activation)

            layers: list[nn.Module] = []
            for i in range(len(dims) - 1):
                layers.append(nn.Linear(dims[i], dims[i + 1]))
                if i < len(dims) - 2:
                    layers.append(act)

            if config.output_activation is not None:
                out_act = get_activation(config.output_activation)
                layers.append(out_act)

            self.net = nn.Sequential(*layers)
            self.apply(self._init)

    @property
    def mode(self) -> Literal["scalar", "mlp"]:
        """Mode of the parameter: 'scalar' or 'mlp'."""
        return self._mode

    @staticmethod
    def _init(m: nn.Module) -> None:
        if isinstance(m, nn.Linear):
            nn.init.xavier_normal_(m.weight)
            nn.init.zeros_(m.bias)

    @override
    def forward(self, x: Tensor | None = None) -> Tensor:
        """
        Get the value of the parameter.

        Args:
            x: Input tensor (required for 'mlp' mode).

        Returns:
            The parameter value.
        """
        if self.mode == "scalar":
            return self.value if x is None else self.value.expand_as(x)
        else:
            if x is None:
                raise TypeError("Function-valued parameter requires input.")
            return cast(Tensor, self.net(x))

config = config instance-attribute

mode: Literal['scalar', 'mlp'] property

Mode of the parameter: 'scalar' or 'mlp'.

net = nn.Sequential(*layers) instance-attribute

value = nn.Parameter(torch.tensor(float(config.init_value), dtype=(torch.float32))) instance-attribute

__init__(config: ScalarConfig | MLPConfig)

Source code in src/anypinn/core/nn.py
def __init__(
    self,
    config: ScalarConfig | MLPConfig,
):
    super().__init__()
    self.config = config
    self._mode: Literal["scalar", "mlp"]

    if isinstance(config, ScalarConfig):
        self._mode = "scalar"
        self.value = nn.Parameter(torch.tensor(float(config.init_value), dtype=torch.float32))

    else:  # isinstance(config, MLPConfig)
        self._mode = "mlp"
        dims = [config.in_dim] + config.hidden_layers + [config.out_dim]
        act = get_activation(config.activation)

        layers: list[nn.Module] = []
        for i in range(len(dims) - 1):
            layers.append(nn.Linear(dims[i], dims[i + 1]))
            if i < len(dims) - 2:
                layers.append(act)

        if config.output_activation is not None:
            out_act = get_activation(config.output_activation)
            layers.append(out_act)

        self.net = nn.Sequential(*layers)
        self.apply(self._init)

forward(x: Tensor | None = None) -> Tensor

Get the value of the parameter.

Parameters:

Name Type Description Default
x Tensor | None

Input tensor (required for 'mlp' mode).

None

Returns:

Type Description
Tensor

The parameter value.

Source code in src/anypinn/core/nn.py
@override
def forward(self, x: Tensor | None = None) -> Tensor:
    """
    Get the value of the parameter.

    Args:
        x: Input tensor (required for 'mlp' mode).

    Returns:
        The parameter value.
    """
    if self.mode == "scalar":
        return self.value if x is None else self.value.expand_as(x)
    else:
        if x is None:
            raise TypeError("Function-valued parameter requires input.")
        return cast(Tensor, self.net(x))

build_criterion(name: Criteria) -> nn.Module

Return the loss-criterion module for the given name.

Parameters:

Name Type Description Default
name Criteria

One of "mse", "huber", "l1".

required

Returns:

Type Description
Module

The corresponding PyTorch loss module.

Source code in src/anypinn/core/nn.py
def build_criterion(name: Criteria) -> nn.Module:
    """
    Return the loss-criterion module for the given name.

    Args:
        name: One of ``"mse"``, ``"huber"``, ``"l1"``.

    Returns:
        The corresponding PyTorch loss module.
    """
    return {
        "mse": nn.MSELoss(),
        "huber": nn.HuberLoss(),
        "l1": nn.L1Loss(),
    }[name]

get_activation(name: Activations) -> nn.Module

Get the activation function module by name.

Parameters:

Name Type Description Default
name Activations

The name of the activation function.

required

Returns:

Type Description
Module

The PyTorch activation module.

Source code in src/anypinn/core/nn.py
def get_activation(name: Activations) -> nn.Module:
    """
    Get the activation function module by name.

    Args:
        name: The name of the activation function.

    Returns:
        The PyTorch activation module.
    """
    return {
        "tanh": nn.Tanh(),
        "relu": nn.ReLU(),
        "leaky_relu": nn.LeakyReLU(),
        "sigmoid": nn.Sigmoid(),
        "selu": nn.SELU(),
        "softplus": nn.Softplus(),
        "identity": nn.Identity(),
    }[name]