Skip to content

Commit

Permalink
[Major] Glocal Modelling v2 (#1008)
Browse files Browse the repository at this point in the history
* seasonality

* plotting seasonalities fixed

* glocal trend implemented

* glocal trend implemented

* black

* black

* starting glocal trend

* modular code + seasonality with diff global/locals + glocal trend v1 done

* Individual neural nets for future regressors almost done. some tests failing

* Individual neural nets for future regressors done

* shared neural networks for future regressors component

* local seasonality can now be regularised by global seasonality

* typo. using trend config property on seasonalityconfig

* removing variables used for dev

* changing names. Final tests premerging

* Update neural_nets.py

* black

* debug yos

* run yos on main

* fix double compute of AR components

* update debug notebooks

---------

Co-authored-by: Oskar Triebe <ourownstory@users.noreply.github.com>
Co-authored-by: leoniewgnr <42536262+leoniewgnr@users.noreply.github.com>
  • Loading branch information
3 people authored Feb 14, 2024
1 parent a99059a commit d4dffe9
Show file tree
Hide file tree
Showing 31 changed files with 28,175 additions and 110 deletions.
3 changes: 1 addition & 2 deletions docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,12 +14,11 @@

import os
import sys
from typing import Any, Dict

import sphinx_fontawesome # noqa: F401
from sphinx.ext.autodoc import between

from typing import Any, Dict

# sys.path.insert(0, os.path.abspath('.'))
sys.path.insert(0, os.path.abspath("../.."))

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -787,7 +787,7 @@
"source": [
"m = NeuralProphet(\n",
" trend_global_local=\"local\",\n",
" season_global_local=\"local\",\n",
" season_global_localcal\",\n",
" changepoints_range=0.8,\n",
" epochs=20,\n",
" trend_reg=5,\n",
Expand Down
3,795 changes: 3,795 additions & 0 deletions docs/source/how-to-guides/feature-guides/global_local_modeling_fut_regr.ipynb

Large diffs are not rendered by default.

1,728 changes: 1,728 additions & 0 deletions docs/source/how-to-guides/feature-guides/glocal_trend.ipynb

Large diffs are not rendered by default.

2 changes: 1 addition & 1 deletion docs/source/tutorials/tutorial09.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -389,7 +389,7 @@
"source": [
"m = NeuralProphet(\n",
" trend_global_local=\"local\",\n",
" season_global_local=\"local\",\n",
" seasonality_global_local=\"local\",\n",
")\n",
"m.set_plotting_backend(\"plotly-static\")\n",
"metrics = m.fit(df, freq=\"H\")\n",
Expand Down
2 changes: 2 additions & 0 deletions neuralprophet/components/future_regressors/linear.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import torch
import torch.nn as nn

from neuralprophet import utils

Check failure on line 4 in neuralprophet/components/future_regressors/linear.py

View workflow job for this annotation

GitHub Actions / flake8

'neuralprophet.utils' imported but unused
from neuralprophet.components.future_regressors import FutureRegressors
from neuralprophet.utils_torch import init_parameter

Expand All @@ -17,6 +18,7 @@ def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend
device=device,
config_trend_none_bool=config_trend_none_bool,
)

if self.regressors_dims is not None:
# Regresors params
self.regressor_params = nn.ParameterDict(
Expand Down
131 changes: 131 additions & 0 deletions neuralprophet/components/future_regressors/neural_nets.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,131 @@
from collections import OrderedDict

import torch.nn as nn

from neuralprophet.components.future_regressors import FutureRegressors
from neuralprophet.utils_torch import init_parameter, interprete_model

Check failure on line 6 in neuralprophet/components/future_regressors/neural_nets.py

View workflow job for this annotation

GitHub Actions / flake8

'neuralprophet.utils_torch.init_parameter' imported but unused

# from neuralprophet.utils_torch import init_parameter


class NeuralNetsFutureRegressors(FutureRegressors):
def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend_none_bool):
super().__init__(
config=config,
n_forecasts=n_forecasts,
quantiles=quantiles,
id_list=id_list,
device=device,
config_trend_none_bool=config_trend_none_bool,
)
if self.regressors_dims is not None:
# Regresors params
self.regressor_nets = nn.ModuleDict({})
# TO DO: if no hidden layers, then just a as legacy
self.d_hidden_regressors = config.d_hidden
self.num_hidden_layers_regressors = config.num_hidden_layers
# one net per regressor. to be adapted to combined network
for regressor in self.regressors_dims.keys():
# Nets for both additive and multiplicative regressors
regressor_net = nn.ModuleList()
# This will be later 1 + static covariates
d_inputs = 1
for i in range(self.num_hidden_layers_regressors):
regressor_net.append(nn.Linear(d_inputs, self.d_hidden_regressors, bias=True))
d_inputs = self.d_hidden_regressors
# final layer has input size d_inputs and output size equal to no. of forecasts * no. of quantiles
regressor_net.append(nn.Linear(d_inputs, self.n_forecasts * len(self.quantiles), bias=False))
for lay in regressor_net:
nn.init.kaiming_normal_(lay.weight, mode="fan_in")
self.regressor_nets[regressor] = regressor_net

def get_reg_weights(self, name):
"""
Get attributions of regressors component network w.r.t. the model input.
Parameters
----------
name : string
Regressor name
Returns
-------
torch.tensor
Weight corresponding to the given regressor
"""

reg_attributions = interprete_model(
self,
net="regressor_nets",
forward_func="regressor",
_num_in_features=self.regressor_nets[name][0].in_features,
_num_out_features=self.regressor_nets[name][-1].out_features,
additional_forward_args=name,
)

return reg_attributions

def regressor(self, regressor_input, name):
"""Compute single regressor component.
Parameters
----------
regressor_input : torch.Tensor, float
regressor values at corresponding, dims: (batch, n_forecasts, 1)
nam : str
Name of regressor, for attribution to corresponding model weights
Returns
-------
torch.Tensor
Forecast component of dims (batch, n_forecasts, num_quantiles)
"""
x = regressor_input
for i in range(self.num_hidden_layers_regressors + 1):
if i > 0:
x = nn.functional.relu(x)
x = self.regressor_nets[name][i](x)

# segment the last dimension to match the quantiles
x = x.reshape(x.shape[0], self.n_forecasts, len(self.quantiles))
return x

def all_regressors(self, regressor_inputs, mode):
"""Compute all regressors components.
Parameters
----------
regressor_inputs : torch.Tensor, float
regressor values at corresponding, dims: (batch, n_forecasts, num_regressors)
Returns
-------
torch.Tensor
Forecast component of dims (batch, n_forecasts, num_quantiles)
"""
# Select only elements from OrderedDict that have the value mode == 'mode_of_interest'
regressors_dims_filtered = OrderedDict((k, v) for k, v in self.regressors_dims.items() if v["mode"] == mode)
for i, name in enumerate(regressors_dims_filtered.keys()):
regressor_index = regressors_dims_filtered[name]["regressor_index"]
regressor_input = regressor_inputs[:, :, regressor_index].unsqueeze(dim=2)
if i == 0:
x = self.regressor(regressor_input, name=name)
if i > 0:
x = x + self.regressor(regressor_input, name=name)
return x

def forward(self, inputs, mode, indeces=None):
"""Compute all seasonality components.
Parameters
----------
f_r : torch.Tensor, float
future regressors inputs
mode: string, either "additive" or "multiplicative"
mode of the regressors
Returns
-------
torch.Tensor
Forecast component of dims (batch, n_forecasts, no_quantiles)
"""

if "additive" == mode:
f_r = self.all_regressors(inputs, mode="additive")
if "multiplicative" == mode:
f_r = self.all_regressors(inputs, mode="multiplicative")
return f_r
112 changes: 112 additions & 0 deletions neuralprophet/components/future_regressors/shared_neural_nets.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
from collections import Counter, OrderedDict

Check failure on line 1 in neuralprophet/components/future_regressors/shared_neural_nets.py

View workflow job for this annotation

GitHub Actions / flake8

'collections.OrderedDict' imported but unused

import torch

Check failure on line 3 in neuralprophet/components/future_regressors/shared_neural_nets.py

View workflow job for this annotation

GitHub Actions / flake8

'torch' imported but unused
import torch.nn as nn

from neuralprophet.components.future_regressors import FutureRegressors
from neuralprophet.utils_torch import init_parameter, interprete_model

Check failure on line 7 in neuralprophet/components/future_regressors/shared_neural_nets.py

View workflow job for this annotation

GitHub Actions / flake8

'neuralprophet.utils_torch.init_parameter' imported but unused

# from neuralprophet.utils_torch import init_parameter


class SharedNeuralNetsFutureRegressors(FutureRegressors):
def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend_none_bool):
super().__init__(
config=config,
n_forecasts=n_forecasts,
quantiles=quantiles,
id_list=id_list,
device=device,
config_trend_none_bool=config_trend_none_bool,
)
if self.regressors_dims is not None:
# Regresors params
self.regressor_nets = nn.ModuleDict({})
# TO DO: if no hidden layers, then just a as legacy
self.d_hidden_regressors = config.d_hidden
self.num_hidden_layers_regressors = config.num_hidden_layers
# Combined network
for net_i, size_i in Counter([x["mode"] for x in self.regressors_dims.values()]).items():
# Nets for both additive and multiplicative regressors
regressor_net = nn.ModuleList()
# This will be later size_i(1 + static covariates)
d_inputs = size_i
for i in range(self.num_hidden_layers_regressors):
regressor_net.append(nn.Linear(d_inputs, self.d_hidden_regressors, bias=True))
d_inputs = self.d_hidden_regressors
# final layer has input size d_inputs and output size equal to no. of forecasts * no. of quantiles
regressor_net.append(nn.Linear(d_inputs, self.n_forecasts * len(self.quantiles), bias=False))
for lay in regressor_net:
nn.init.kaiming_normal_(lay.weight, mode="fan_in")
self.regressor_nets[net_i] = regressor_net

def get_reg_weights(self, name):
"""
Get attributions of regressors component network w.r.t. the model input.
Parameters
----------
name : string
Regressor name
Returns
-------
torch.tensor
Weight corresponding to the given regressor
"""

mode = self.config_regressors.regressors[name].mode
reg_attributions = interprete_model(
self,
net="regressor_nets",
forward_func="regressors_net",
_num_in_features=self.regressor_nets[mode][0].in_features,
_num_out_features=self.regressor_nets[mode][-1].out_features,
additional_forward_args=mode,
)

regressor_index = self.regressors_dims[name]["regressor_index"]
return reg_attributions[:, regressor_index].unsqueeze(-1)

def regressors_net(self, regressor_inputs, mode):
"""Compute single regressor component.
Parameters
----------
regressor_input : torch.Tensor, float
regressor values at corresponding, dims: (batch, n_forecasts, 1)
nam : str
Name of regressor, for attribution to corresponding model weights
Returns
-------
torch.Tensor
Forecast component of dims (batch, n_forecasts, num_quantiles)
"""
x = regressor_inputs
for i in range(self.num_hidden_layers_regressors + 1):
if i > 0:
x = nn.functional.relu(x)
x = self.regressor_nets[mode][i](x)

# segment the last dimension to match the quantiles
x = x.reshape(x.shape[0], self.n_forecasts, len(self.quantiles))
return x

def forward(self, inputs, mode, indeces=None):
"""Compute all seasonality components.
Parameters
----------
f_r : torch.Tensor, float
future regressors inputs
mode: string, either "additive" or "multiplicative"
mode of the regressors
Returns
-------
torch.Tensor
Forecast component of dims (batch, n_forecasts, no_quantiles)
"""

if "additive" == mode:
f_r = self.regressors_net(inputs, mode="additive")
if "multiplicative" == mode:
f_r = self.regressors_net(inputs, mode="multiplicative")
return f_r
Loading

0 comments on commit d4dffe9

Please sign in to comment.