Skip to content

Inference Wrapper

NumPy-in/NumPy-out inference for trained Learners.

InferenceWrapper

InferenceWrapper(learner, device: str | device = 'cpu')

NumPy-in/NumPy-out inference for trained Learners.

Reconstructs the training-time input pipeline (including prediction_concat concatenation) so models get the same input format they saw during training.

Parameters:

Name Type Description Default
learner

trained Learner with model and dls

required
device str | device

device for inference ('cpu', 'cuda')

'cpu'
Source code in tsfast/inference/wrapper.py
def __init__(
    self,
    learner,
    device: str | torch.device = "cpu",
):
    if not hasattr(learner, "model") or not hasattr(learner, "dls"):
        raise TypeError("Input 'learner' must be a valid Learner with model and dls.")
    self.device = torch.device(device)
    self.model = learner.model.to(self.device).eval()
    self._pred_cb = _find_prediction_concat(learner)
    self._n_model_inputs = _get_n_model_inputs(self.model)

inference

inference(np_input: ndarray, np_output_init: ndarray | None = None) -> np.ndarray

Run inference on numpy input, returns numpy output.

Output shape mirrors input dimensionality: - 1D (seq_len,) → 1D (seq_len,) (single-feature output only) - 2D (seq_len, features) → 2D (seq_len, out_features) - 3D (batch, seq_len, features) → 3D (batch, seq_len, out_features)

Parameters:

Name Type Description Default
np_input ndarray

input time series (u)

required
np_output_init ndarray | None

initial output series (y_init), required if trained with prediction_concat

None
Source code in tsfast/inference/wrapper.py
@torch.no_grad()
def inference(
    self,
    np_input: np.ndarray,
    np_output_init: np.ndarray | None = None,
) -> np.ndarray:
    """Run inference on numpy input, returns numpy output.

    Output shape mirrors input dimensionality:
    - 1D ``(seq_len,)`` → 1D ``(seq_len,)`` (single-feature output only)
    - 2D ``(seq_len, features)`` → 2D ``(seq_len, out_features)``
    - 3D ``(batch, seq_len, features)`` → 3D ``(batch, seq_len, out_features)``

    Args:
        np_input: input time series (u)
        np_output_init: initial output series (y_init), required if trained
            with prediction_concat
    """
    input_ndim = np_input.ndim
    u_tensor = self._prepare_tensor(np_input, "np_input")
    input_seq_len = u_tensor.shape[1]

    if np_output_init is not None:
        y_init_tensor = self._prepare_tensor(np_output_init, "np_output_init")
        if u_tensor.shape[0] != y_init_tensor.shape[0]:
            raise ValueError(
                f"Batch size mismatch: np_input has {u_tensor.shape[0]}, "
                f"np_output_init has {y_init_tensor.shape[0]}."
            )
    else:
        y_init_tensor = None

    if self._pred_cb:
        if y_init_tensor is None:
            raise ValueError("Model trained with prediction_concat requires 'np_output_init'.")
        if input_seq_len - self._pred_cb.t_offset <= 0:
            raise ValueError(f"Input seq len ({input_seq_len}) too short for offset ({self._pred_cb.t_offset}).")
        if self._pred_cb.t_offset > 0:
            u_tensor = u_tensor[:, self._pred_cb.t_offset :, :]
            y_init_tensor = y_init_tensor[:, : -self._pred_cb.t_offset, :]
        y_init_tensor = self._adjust_seq_len(y_init_tensor, input_seq_len, "y_init")
        final_input = torch.cat((u_tensor, y_init_tensor), dim=-1)
    elif y_init_tensor is not None and self._needs_concat(u_tensor):
        y_init_tensor = self._adjust_seq_len(y_init_tensor, input_seq_len, "y_init")
        final_input = torch.cat((u_tensor, y_init_tensor), dim=-1)
    else:
        final_input = u_tensor

    model_output = self.model(final_input)
    output_tensor = model_output[0] if isinstance(model_output, tuple) else model_output
    if not isinstance(output_tensor, torch.Tensor):
        raise RuntimeError(f"Model output is not a tensor. Type: {type(output_tensor)}")

    result = output_tensor.cpu().numpy()
    match input_ndim:
        case 1:
            if result.shape[-1] != 1:
                raise ValueError(
                    f"Cannot return 1D output: model produces {result.shape[-1]} features. "
                    f"Pass 2D input (seq_len, features) instead."
                )
            return result[0, :, 0]
        case 2:
            return result[0]
        case _:
            return result