Skip to content

anypinn.cli.scaffold.lorenz.ode_synthetic

Lorenz system — mathematical definition.

BETA_KEY = 'beta' module-attribute

NOISE_STD = 0.5 module-attribute

RHO_KEY = 'rho' module-attribute

SCALE = 20.0 module-attribute

SIGMA_KEY = 'sigma' module-attribute

TRUE_BETA = 8.0 / 3.0 module-attribute

TRUE_RHO = 28.0 module-attribute

TRUE_SIGMA = 10.0 module-attribute

T_TOTAL = 3 module-attribute

X0 = -8.0 module-attribute

X_KEY = 'x' module-attribute

Y0 = 7.0 module-attribute

Y_KEY = 'y' module-attribute

Z0 = 27.0 module-attribute

Z_KEY = 'z' module-attribute

validation: ValidationRegistry = {SIGMA_KEY: lambda x: torch.full_like(x, TRUE_SIGMA), RHO_KEY: lambda x: torch.full_like(x, TRUE_RHO), BETA_KEY: lambda x: torch.full_like(x, TRUE_BETA)} module-attribute

create_data_module(hp: ODEHyperparameters)

Source code in src/anypinn/cli/scaffold/lorenz/ode_synthetic.py
def create_data_module(hp: ODEHyperparameters):
    from anypinn.catalog.lorenz import LorenzDataModule

    def lorenz_unscaled(x: Tensor, y: Tensor, args: ArgsRegistry) -> Tensor:
        lx, ly, lz = y
        sigma = args[SIGMA_KEY]
        rho = args[RHO_KEY]
        beta = args[BETA_KEY]
        dx = sigma(x) * (ly - lx)
        dy = lx * (rho(x) - lz) - ly
        dz = lx * ly - beta(x) * lz
        return torch.stack([dx, dy, dz])

    gen_props = ODEProperties(
        ode=lorenz_unscaled,
        y0=torch.tensor([X0, Y0, Z0]),
        args={
            SIGMA_KEY: Argument(TRUE_SIGMA),
            RHO_KEY: Argument(TRUE_RHO),
            BETA_KEY: Argument(TRUE_BETA),
        },
    )

    return LorenzDataModule(
        hp=hp,
        gen_props=gen_props,
        noise_std=NOISE_STD,
        validation=validation,
        callbacks=[DataScaling(y_scale=[1 / SCALE, 1 / SCALE, 1 / SCALE])],
    )

create_problem(hp: ODEHyperparameters) -> ODEInverseProblem

Source code in src/anypinn/cli/scaffold/lorenz/ode_synthetic.py
def create_problem(hp: ODEHyperparameters) -> ODEInverseProblem:
    props = ODEProperties(
        ode=lorenz_scaled,
        y0=torch.tensor([X0, Y0, Z0]) / SCALE,
        args={},
    )

    fields = FieldsRegistry(
        {
            X_KEY: Field(config=hp.fields_config),
            Y_KEY: Field(config=hp.fields_config),
            Z_KEY: Field(config=hp.fields_config),
        }
    )
    params = ParamsRegistry(
        {
            SIGMA_KEY: Parameter(config=hp.params_config),
            RHO_KEY: Parameter(config=hp.params_config),
            BETA_KEY: Parameter(config=hp.params_config),
        }
    )

    def predict_data(x_data: Tensor, fields: FieldsRegistry, _params: ParamsRegistry) -> Tensor:
        x_pred = fields[X_KEY](x_data)
        y_pred = fields[Y_KEY](x_data)
        z_pred = fields[Z_KEY](x_data)
        return torch.stack([x_pred, y_pred, z_pred], dim=1)

    return ODEInverseProblem(
        props=props,
        hp=hp,
        fields=fields,
        params=params,
        predict_data=predict_data,
    )

lorenz_scaled(x: Tensor, y: Tensor, args: ArgsRegistry) -> Tensor

Scaled Lorenz ODE. States pre-divided by SCALE, time by T_TOTAL.

Source code in src/anypinn/cli/scaffold/lorenz/ode_synthetic.py
def lorenz_scaled(x: Tensor, y: Tensor, args: ArgsRegistry) -> Tensor:
    """Scaled Lorenz ODE. States pre-divided by SCALE, time by T_TOTAL."""
    lx, ly, lz = y
    sigma = args[SIGMA_KEY]
    rho = args[RHO_KEY]
    beta = args[BETA_KEY]

    dx = sigma(x) * (ly - lx)
    dy = lx * (rho(x) / SCALE - lz) - ly
    dz = lx * ly * SCALE - beta(x) * lz

    dx = dx * T_TOTAL
    dy = dy * T_TOTAL
    dz = dz * T_TOTAL
    return torch.stack([dx, dy, dz])