Skip to content

Functional

Low-level functional interfaces for core operations. These functions operate directly on tensors without the NamedLinop abstraction, and are used internally by the linop classes.

torchlinops.functional

array_to_blocks

array_to_blocks(
    input,
    block_shape: tuple[int, ...],
    stride: Optional[tuple[int, ...]] = None,
    mask: Optional[Bool[Tensor, ...]] = None,
    out: Optional[Tensor] = None,
)

Wrapper for default arguments

Source code in src/torchlinops/functional/_unfold/array_to_blocks.py
def array_to_blocks(
    input,
    block_shape: tuple[int, ...],
    stride: Optional[tuple[int, ...]] = None,
    mask: Optional[Bool[Tensor, "..."]] = None,
    out: Optional[Tensor] = None,
):
    """Wrapper for default arguments"""
    return ArrayToBlocksFn.apply(input, block_shape, stride, mask, out)

blocks_to_array

blocks_to_array(
    input,
    im_size: tuple,
    block_shape: tuple,
    stride: Optional[tuple] = None,
    mask: Optional[Bool[Tensor, ...]] = None,
    out: Optional[Tensor] = None,
)

Wrapper for default arguments

Source code in src/torchlinops/functional/_unfold/array_to_blocks.py
def blocks_to_array(
    input,
    im_size: tuple,
    block_shape: tuple,
    stride: Optional[tuple] = None,
    mask: Optional[Bool[Tensor, "..."]] = None,
    out: Optional[Tensor] = None,
):
    """Wrapper for default arguments"""
    return BlocksToArrayFn.apply(input, im_size, block_shape, stride, mask, out)

canonicalize_idx

canonicalize_idx(idx: Integer[Tensor, ...], dim: int = -1)
PARAMETER DESCRIPTION
idx

TYPE: [B1... D B2...]

dim

The dimension of idx to tuple-ify

TYPE: int DEFAULT: -1

RETURNS DESCRIPTION
D-tuple of [B1... B2...] tensors
Note

TYPE: dim is usually 0 or -1

Source code in src/torchlinops/functional/_index/index.py
def canonicalize_idx(idx: Integer[Tensor, "..."], dim: int = -1):
    """
    Parameters
    ----------
    idx : [B1... D B2...]
    dim : int
        The dimension of idx to tuple-ify

    Returns
    -------
    D-tuple of [B1... B2...] tensors

    Note: dim is usually 0 or -1

    """
    return tuple(torch.select(idx, dim, i) for i in range(idx.shape[dim]))

center_crop

center_crop(
    x: Tensor,
    im_size: tuple[int, ...],
    crop_im_size: tuple[int, ...],
)

Center crops the input tensor to the specified size.

PARAMETER DESCRIPTION
x

The input tensor to be cropped.

TYPE: Tensor

im_size

The original size of the input tensor.

TYPE: tuple of int

crop_im_size

The desired size after cropping.

TYPE: tuple of int

RETURNS DESCRIPTION
Tensor

The cropped tensor.

Notes

This function uses the Pad operator to perform the cropping operation. It calculates the necessary cropping dimensions and applies the cropping from the center of the original tensor to achieve the specified crop_im_size.

Source code in src/torchlinops/functional/_pad.py
def center_crop(x: Tensor, im_size: tuple[int, ...], crop_im_size: tuple[int, ...]):
    """
    Center crops the input tensor to the specified size.

    Parameters
    ----------
    x : Tensor
        The input tensor to be cropped.
    im_size : tuple of int
        The original size of the input tensor.
    crop_im_size : tuple of int
        The desired size after cropping.

    Returns
    -------
    Tensor
        The cropped tensor.

    Notes
    -----
    This function uses the `Pad` operator to perform the cropping operation.
    It calculates the necessary cropping dimensions and applies the cropping
    from the center of the original tensor to achieve the specified `crop_im_size`.
    """
    ndim = len(im_size)
    pad = pad_to_size(crop_im_size, im_size)
    crop_slice = crop_slice_from_pad(pad)
    crop_linop = SimpleNamespace(
        im_size=crop_im_size,
        pad_im_size=im_size,
        D=ndim,
        pad=pad,
        crop_slice=crop_slice,
    )
    return Pad.adj_fn(crop_linop, x)

center_pad

center_pad(
    x: Tensor,
    im_size: tuple[int, ...],
    pad_im_size: tuple[int, ...],
)

Center pads the input tensor to the specified size.

PARAMETER DESCRIPTION
x

The input tensor to be padded.

TYPE: Tensor

im_size

The original size of the input tensor.

TYPE: tuple of int

pad_im_size

The desired size after padding.

TYPE: tuple of int

RETURNS DESCRIPTION
Tensor

The padded tensor.

Notes

This function uses the Pad operator to perform the padding operation. It calculates the necessary padding dimensions and applies the padding to ensure that the output tensor matches the pad_im_size while preserving the center of the original tensor.

Source code in src/torchlinops/functional/_pad.py
def center_pad(x: Tensor, im_size: tuple[int, ...], pad_im_size: tuple[int, ...]):
    """
    Center pads the input tensor to the specified size.

    Parameters
    ----------
    x : Tensor
        The input tensor to be padded.
    im_size : tuple of int
        The original size of the input tensor.
    pad_im_size : tuple of int
        The desired size after padding.

    Returns
    -------
    Tensor
        The padded tensor.

    Notes
    -----
    This function uses the `Pad` operator to perform the padding operation.
    It calculates the necessary padding dimensions and applies the padding
    to ensure that the output tensor matches the `pad_im_size` while preserving
    the center of the original tensor.
    """
    ndim = len(im_size)
    pad = pad_to_size(im_size, pad_im_size)
    crop_slice = crop_slice_from_pad(pad)
    pad_linop = SimpleNamespace(
        im_size=im_size,
        pad_im_size=pad_im_size,
        D=ndim,
        pad=pad,
        crop_slice=crop_slice,
    )
    return Pad.fn(pad_linop, x)

ensure_tensor_indexing

ensure_tensor_indexing(
    idx: tuple[IndexOrSlice, ...], tshape: tuple | Size
) -> tuple[Tensor, ...]

Convert any slice()-type indexes to tensor indexes.

Also broadcasts by appending slice(None) to the front of idx

PARAMETER DESCRIPTION
idx

Tuple of torch.Tensor (integer-valued) index tensors or slice() objects

TYPE: tuple

tshape

Target size, should have length greater than or equal to that of idx

TYPE: Size or tuple

Source code in src/torchlinops/functional/_index/index.py
def ensure_tensor_indexing(
    idx: tuple[IndexOrSlice, ...], tshape: tuple | torch.Size
) -> tuple[Tensor, ...]:
    """Convert any slice()-type indexes to tensor indexes.

    Also broadcasts by appending slice(None) to the front of idx

    Parameters
    ----------
    idx : tuple
        Tuple of torch.Tensor (integer-valued) index tensors or slice() objects
    tshape : torch.Size or tuple
        Target size, should have length greater than or equal to that of idx

    """
    # Prepare idx
    idx = list(idx)
    if len(tshape) < len(idx):
        raise ValueError(f"Cannot broadcast idx {idx} to tshape {tshape}")
    while len(tshape) > len(idx):
        # Insert empty slices until index length matches length of target shape
        idx.insert(0, slice(None))

    # Prepare out
    out = []
    for d, (size, i) in enumerate(zip(tshape, idx)):
        if isinstance(i, Tensor):
            out.append(i)
        elif isinstance(i, slice):
            range_tensor = torch.tensor(slice2range(i, size))
            # Unsqueeze last dimensions
            range_tensor = _unsqueeze_last(range_tensor, len(tshape) - d - 1)
            out.append(range_tensor)
        else:
            raise ValueError(
                f"idx must contain only tensors or slice() objects but got {i}"
            )
    return tuple(out)

fold

fold(
    x,
    im_size: tuple,
    block_size: tuple,
    stride: tuple,
    mask: Optional[Bool[Tensor, ...]] = None,
    output: Optional[Tensor] = None,
) -> Tensor

Accumulate an array of blocks into a full array

PARAMETER DESCRIPTION
x

Shape [B..., blocks, block_size]

TYPE: Tensor

RETURNS DESCRIPTION
Tensor

If mask is not None, block_size will be an int equal to the number of True elements in the mask Otherwise it will be the full block shape.

TYPE: Shape [B..., *im_size]

Source code in src/torchlinops/functional/_unfold/fold.py
def fold(
    x,
    im_size: tuple,
    block_size: tuple,
    stride: tuple,
    mask: Optional[Bool[Tensor, "..."]] = None,
    output: Optional[Tensor] = None,
) -> Tensor:
    """Accumulate an array of blocks into a full array

    Parameters
    ----------
    x : Tensor
        Shape [B..., blocks, block_size]

    Returns
    -------
    Tensor: Shape [B..., *im_size]
        If mask is not None, block_size will be an int equal to the number of True elements in the mask
        Otherwise it will be the full block shape.
    """
    x_flat, shapes, is_complex = _prep_fold(x, im_size, block_size, stride, mask)

    if is_complex:
        x_flat = torch.view_as_real(x_flat)
        x_flat = torch.flatten(x_flat, -2, -1)  # Flatten real/imag into last dim
    y_flat = _fold(x_flat, output=output, **shapes)
    y = y_flat.reshape(*shapes["batch_shape"], *shapes["im_size"])
    if is_complex:
        y = y.reshape(*y.shape[:-1], y.shape[-1] // 2, 2)
        y = torch.view_as_complex(y)
    return y

get_nblocks

get_nblocks(
    im_size: tuple[int, ...],
    block_size: tuple[int, ...],
    block_stride: Optional[tuple[int, ...]] = None,
) -> tuple[int, ...]

Given an image and a block size, returns the number of valid blocks in each direction.

Blocks may overlap

Examples:

>>> get_nblocks((5, 5), (3, 3), (1, 1))
(3, 3)
>>> get_nblocks((5, 5), (3, 3), (2, 2))
(2, 2)
>>> get_nblocks((6, 6), (3, 3), (2, 2))
(2, 2)
>>> get_nblocks((7, 7), (3, 3), (2, 2))
(3, 3)
>>> get_nblocks((10, 10), (8, 8), (4, 4))
(1, 1)
Source code in src/torchlinops/functional/_unfold/nblocks.py
def get_nblocks(
    im_size: tuple[int, ...],
    block_size: tuple[int, ...],
    block_stride: Optional[tuple[int, ...]] = None,
) -> tuple[int, ...]:
    """Given an image and a block size, returns the number of valid blocks in each direction.

    Blocks may overlap

    Examples
    --------
    >>> get_nblocks((5, 5), (3, 3), (1, 1))
    (3, 3)
    >>> get_nblocks((5, 5), (3, 3), (2, 2))
    (2, 2)
    >>> get_nblocks((6, 6), (3, 3), (2, 2))
    (2, 2)
    >>> get_nblocks((7, 7), (3, 3), (2, 2))
    (3, 3)
    >>> get_nblocks((10, 10), (8, 8), (4, 4))
    (1, 1)
    """
    assert len(im_size) == len(block_size), (
        f"im_size {im_size} and block_size {block_size} don't match"
    )
    block_stride = block_stride if block_stride is not None else (1,) * len(block_size)
    output = tuple(
        (im - bl) // st + 1 for im, bl, st in zip(im_size, block_size, block_stride)
    )
    return output

grid

grid(
    vals: Inexact[Tensor, ...],
    locs: Float[Tensor, "... D"],
    grid_size: tuple[int, ...],
    width: float | tuple[float, ...],
    kernel: str = "kaiser_bessel",
    norm: int = 1,
    pad_mode: Literal["zero", "circular"] = "circular",
    kernel_params: dict = None,
)

Interpolate from off-grid values to on-grid locations.

grid_size : tuple[int, ...] Shape of output array, excluding batch dimensions For example, if the gridded output should have shape [3, 64, 64], where 3 is the batch size, then grid_size would be [64, 64]

norm: int, 1 or 2 if 2, uses Euclidean norm to grid points to compute weights if 1, computes weights as product of axis-aligned norm weights - Same as sigpy

Source code in src/torchlinops/functional/_interp/grid.py
def grid(
    vals: Inexact[Tensor, "..."],
    locs: Float[Tensor, "... D"],
    grid_size: tuple[int, ...],
    width: float | tuple[float, ...],
    kernel: str = "kaiser_bessel",
    norm: int = 1,
    pad_mode: Literal["zero", "circular"] = "circular",
    kernel_params: dict = None,
):
    """Interpolate from off-grid values to on-grid locations.

    grid_size : tuple[int, ...]
        Shape of output array, excluding batch dimensions
        For example, if the gridded output should have shape [3, 64, 64],
        where 3 is the batch size, then grid_size would be [64, 64]

    norm: int, 1 or 2
        if 2, uses Euclidean norm to grid points to compute weights
        if 1, computes weights as product of axis-aligned norm weights
            - Same as sigpy
    """
    kernel_params = {} if kernel_params is None else kernel_params
    vals_flat, locs, shapes = prep_grid_shapes(vals, locs, grid_size, width)
    kernel_params = _apply_default_kernel_params(kernel, kernel_params)
    out_flat = _grid(
        vals_flat,
        locs,
        kernel=kernel,
        norm=norm,
        pad_mode=pad_mode,
        kernel_params=kernel_params,
        **shapes,
    )
    out = out_flat.reshape(*shapes["batch_shape"], *grid_size)
    return out

index

index(
    vals: Shaped[Tensor, ...], idx: tuple[IndexOrSlice, ...]
) -> Tensor
PARAMETER DESCRIPTION
idx

Index

TYPE: tuple of Tensor or Slice objects

Source code in src/torchlinops/functional/_index/index.py
def index(
    vals: Shaped[Tensor, "..."],
    idx: tuple[IndexOrSlice, ...],
) -> Tensor:
    """
    Parameters
    ----------
    idx : tuple of Tensor or Slice objects
        Index
    """
    if len(vals.shape) < len(idx):
        raise ValueError(
            f"Input value with shape {vals.shape} cannot be indexed with index tensors of length {len(idx)}"
        )
    batch_slc = [slice(None)] * (len(vals.shape) - len(idx))
    idx_batched = (*batch_slc, *idx)
    return vals[idx_batched]

index_adjoint

index_adjoint(
    vals: Shaped[Tensor, ...],
    idx: tuple[Integer[Tensor, ...], ...],
    grid_size: tuple[int, ...],
) -> Tensor
PARAMETER DESCRIPTION
vals

Batch size of vals is used to determine batch size of output

TYPE: Shaped[Tensor, ...]

idx

Use ensure_tensor_indexing to guarantee this

TYPE: tuple of integer-valued tensors

grid_size

The shape of the output tensor, excluding batch dimensions

TYPE: tuple of ints

Source code in src/torchlinops/functional/_index/index.py
def index_adjoint(
    vals: Shaped[Tensor, "..."],
    idx: tuple[Integer[Tensor, "..."], ...],
    grid_size: tuple[int, ...],
) -> Tensor:
    """
    Parameters
    ----------
    vals :
        Batch size of vals is used to determine batch size of output
    idx : tuple of integer-valued tensors
        Use ensure_tensor_indexing to guarantee this
    grid_size : tuple of ints
        The shape of the output tensor, excluding batch dimensions
    """
    for d, (dim_idx, dim_size) in enumerate(zip(idx, grid_size)):
        if (dim_idx >= dim_size).any() or (dim_idx < -dim_size).any():
            # mask = (dim_idx >= dim_size) | (dim_idx < -dim_size)
            raise IndexError(
                f"Out-of-bounds index for grid of shape {grid_size}: idx[{d}]"
            )

    idx_stacked = torch.stack(idx, dim=0)
    out = multi_grid(vals, idx_stacked, grid_size)
    return out

interpolate

interpolate(
    vals: Inexact[Tensor, ...],
    locs: Float[Tensor, "... D"],
    width: float | tuple[float, ...],
    kernel="kaiser_bessel",
    norm: int = 1,
    pad_mode: str = "circular",
    kernel_params: dict = None,
)

Interpolate from a regular grid to scattered locations (ungridding).

Evaluates values on a uniform grid at arbitrary non-uniform locations using kernel-based interpolation. This is the forward NUFFT interpolation step. Gradients are computed via the adjoint (gridding) operation.

PARAMETER DESCRIPTION
vals

Values on a regular grid. The last D dimensions are spatial, where D = locs.shape[-1].

TYPE: Inexact[Tensor, ...]

locs

Non-uniform target locations, with coordinates in the range [0, N-1] for each spatial dimension of size N.

TYPE: Float[Tensor, '... D']

width

Interpolation kernel width (in grid units) for each spatial dimension. A scalar applies the same width to all dimensions.

TYPE: float or tuple of float

kernel

Interpolation kernel type. Default is 'kaiser_bessel'.

TYPE: str DEFAULT: 'kaiser_bessel'

norm

Kernel normalization order. Default is 1.

TYPE: int DEFAULT: 1

pad_mode

Padding mode for out-of-bounds access. Default is 'circular'.

TYPE: str DEFAULT: 'circular'

kernel_params

Additional parameters passed to the interpolation kernel (e.g., {'beta': ...} for Kaiser-Bessel).

TYPE: dict DEFAULT: None

RETURNS DESCRIPTION
Tensor

Interpolated values at the non-uniform locations.

Source code in src/torchlinops/functional/_interp/interp.py
def interpolate(
    vals: Inexact[Tensor, "..."],
    locs: Float[Tensor, "... D"],
    width: float | tuple[float, ...],
    kernel="kaiser_bessel",
    norm: int = 1,
    pad_mode: str = "circular",
    kernel_params: dict = None,
):
    """Interpolate from a regular grid to scattered locations (ungridding).

    Evaluates values on a uniform grid at arbitrary non-uniform locations
    using kernel-based interpolation. This is the forward NUFFT interpolation
    step. Gradients are computed via the adjoint (gridding) operation.

    Parameters
    ----------
    vals : Inexact[Tensor, "..."]
        Values on a regular grid. The last ``D`` dimensions are spatial,
        where ``D = locs.shape[-1]``.
    locs : Float[Tensor, "... D"]
        Non-uniform target locations, with coordinates in the range
        ``[0, N-1]`` for each spatial dimension of size ``N``.
    width : float or tuple of float
        Interpolation kernel width (in grid units) for each spatial
        dimension. A scalar applies the same width to all dimensions.
    kernel : str, optional
        Interpolation kernel type. Default is ``'kaiser_bessel'``.
    norm : int, optional
        Kernel normalization order. Default is 1.
    pad_mode : str, optional
        Padding mode for out-of-bounds access. Default is ``'circular'``.
    kernel_params : dict, optional
        Additional parameters passed to the interpolation kernel
        (e.g., ``{'beta': ...}`` for Kaiser-Bessel).

    Returns
    -------
    Tensor
        Interpolated values at the non-uniform locations.
    """
    return InterpolateFn.apply(vals, locs, width, kernel, norm, pad_mode, kernel_params)

interpolate_adjoint

interpolate_adjoint(
    vals: Inexact[Tensor, ...],
    locs: Float[Tensor, "... D"],
    grid_size: tuple[int, ...],
    width: float | tuple[float, ...],
    kernel: str = "kaiser_bessel",
    norm: int = 1,
    pad_mode: str = "circular",
    kernel_params: dict = None,
)

Adjoint of interpolation (gridding) from scattered locations to a regular grid.

Scatters values from non-uniform locations back onto a regular grid using kernel-based gridding. This is the adjoint of the interpolate operation and corresponds to the gridding step in an adjoint NUFFT.

PARAMETER DESCRIPTION
vals

Values at non-uniform locations to be gridded.

TYPE: Inexact[Tensor, ...]

locs

Non-uniform source locations, with coordinates in the range [0, N-1] for each spatial dimension of size N.

TYPE: Float[Tensor, '... D']

grid_size

Output grid size for each spatial dimension.

TYPE: tuple of int

width

Interpolation kernel width (in grid units) for each spatial dimension. A scalar applies the same width to all dimensions.

TYPE: float or tuple of float

kernel

Interpolation kernel type. Default is 'kaiser_bessel'.

TYPE: str DEFAULT: 'kaiser_bessel'

norm

Kernel normalization order. Default is 1.

TYPE: int DEFAULT: 1

pad_mode

Padding mode for out-of-bounds access. Default is 'circular'.

TYPE: str DEFAULT: 'circular'

kernel_params

Additional parameters passed to the interpolation kernel (e.g., {'beta': ...} for Kaiser-Bessel).

TYPE: dict DEFAULT: None

RETURNS DESCRIPTION
Tensor

Gridded values on a regular grid of shape (..., *grid_size).

Source code in src/torchlinops/functional/_interp/interp.py
def interpolate_adjoint(
    vals: Inexact[Tensor, "..."],
    locs: Float[Tensor, "... D"],
    grid_size: tuple[int, ...],
    width: float | tuple[float, ...],
    kernel: str = "kaiser_bessel",
    norm: int = 1,
    pad_mode: str = "circular",
    kernel_params: dict = None,
):
    """Adjoint of interpolation (gridding) from scattered locations to a regular grid.

    Scatters values from non-uniform locations back onto a regular grid
    using kernel-based gridding. This is the adjoint of the ``interpolate``
    operation and corresponds to the gridding step in an adjoint NUFFT.

    Parameters
    ----------
    vals : Inexact[Tensor, "..."]
        Values at non-uniform locations to be gridded.
    locs : Float[Tensor, "... D"]
        Non-uniform source locations, with coordinates in the range
        ``[0, N-1]`` for each spatial dimension of size ``N``.
    grid_size : tuple of int
        Output grid size for each spatial dimension.
    width : float or tuple of float
        Interpolation kernel width (in grid units) for each spatial
        dimension. A scalar applies the same width to all dimensions.
    kernel : str, optional
        Interpolation kernel type. Default is ``'kaiser_bessel'``.
    norm : int, optional
        Kernel normalization order. Default is 1.
    pad_mode : str, optional
        Padding mode for out-of-bounds access. Default is ``'circular'``.
    kernel_params : dict, optional
        Additional parameters passed to the interpolation kernel
        (e.g., ``{'beta': ...}`` for Kaiser-Bessel).

    Returns
    -------
    Tensor
        Gridded values on a regular grid of shape ``(..., *grid_size)``.
    """
    return InterpolateAdjointFn.apply(
        vals,
        locs,
        grid_size,
        width,
        kernel,
        norm,
        pad_mode,
        kernel_params,
    )

mask2idx

mask2idx(
    mask: Bool[Tensor, ...],
) -> tuple[Integer[Tensor, ...], ...]

Converts an n-dimensional boolean tensor into an n-tuple of integer tensors indexing the True elements of the tensor.

PARAMETER DESCRIPTION
mask

A boolean tensor.

TYPE: Tensor

RETURNS DESCRIPTION
tuple[torch.Tensor]:

A tuple of integer tensors indexing the True elements.

Source code in src/torchlinops/functional/_index/index.py
def mask2idx(mask: Bool[Tensor, "..."]) -> tuple[Integer[Tensor, "..."], ...]:
    """Converts an n-dimensional boolean tensor into an n-tuple of integer tensors
    indexing the True elements of the tensor.

    Parameters
    ----------
    mask : torch.Tensor
        A boolean tensor.

    Returns
    -------
    tuple[torch.Tensor]:
        A tuple of integer tensors indexing the True elements.
    """
    if not mask.dtype == torch.bool:
        raise ValueError(f"Input tensor must be of boolean dtype, but got {mask.dtype}")
    return torch.nonzero(mask, as_tuple=True)

nufft

nufft(
    x: Tensor,
    locs: Float[Tensor, "... D"],
    oversamp: float = 1.25,
    width: float = 4.0,
)

Functional interface for the Non-Uniform Fast Fourier Transform.

Computes the forward NUFFT of input data at specified non-uniform locations. Internally applies apodization, zero-padding, FFT, and interpolation.

PARAMETER DESCRIPTION
x

Input data on a regular grid. The last D dimensions are treated as spatial dimensions, where D = locs.shape[-1].

TYPE: Tensor

locs

Non-uniform sample locations. Each entry along the last dimension corresponds to a spatial axis and should lie in [-N//2, N//2] where N is the grid size along that axis.

TYPE: Float[Tensor, '... D']

oversamp

Oversampling factor for the padded FFT grid. Default is 1.25.

TYPE: float DEFAULT: 1.25

width

Interpolation kernel width. Default is 4.0.

TYPE: float DEFAULT: 4.0

RETURNS DESCRIPTION
Tensor

NUFFT values evaluated at the non-uniform locations.

Source code in src/torchlinops/functional/_nufft.py
def nufft(
    x: Tensor,
    locs: Float[Tensor, "... D"],
    oversamp: float = 1.25,
    width: float = 4.0,
):
    """Functional interface for the Non-Uniform Fast Fourier Transform.

    Computes the forward NUFFT of input data at specified non-uniform
    locations. Internally applies apodization, zero-padding, FFT, and
    interpolation.

    Parameters
    ----------
    x : Tensor
        Input data on a regular grid. The last ``D`` dimensions are
        treated as spatial dimensions, where ``D = locs.shape[-1]``.
    locs : Float[Tensor, "... D"]
        Non-uniform sample locations. Each entry along the last dimension
        corresponds to a spatial axis and should lie in
        ``[-N//2, N//2]`` where ``N`` is the grid size along that axis.
    oversamp : float, optional
        Oversampling factor for the padded FFT grid. Default is 1.25.
    width : float, optional
        Interpolation kernel width. Default is 4.0.

    Returns
    -------
    Tensor
        NUFFT values evaluated at the non-uniform locations.
    """

    grid_size = x.shape[-locs.shape[-1] :]
    params = init_nufft(grid_size, locs, oversamp, width, x.device)

    x = x * params.apodize
    x = Pad.fn(params.pad_ns, x)
    x = cfftn(x, dim=params.dim, norm="ortho")
    x = interpolate(
        x,
        params.locs,
        width,
        kernel="kaiser_bessel",
        kernel_params=dict(beta=params.beta),
    )
    x = x / params.scale_factor
    return x

nufft_adjoint

nufft_adjoint(
    x: Tensor,
    locs: Float[Tensor, "... D"],
    grid_size: tuple[int, ...],
    oversamp: float = 1.25,
    width: float = 4.0,
)

Functional interface for the adjoint NUFFT.

Grids non-uniformly sampled data back onto a regular grid. Internally applies adjoint interpolation (gridding), inverse FFT, cropping, and apodization correction.

PARAMETER DESCRIPTION
x

Non-uniformly sampled data to be gridded.

TYPE: Tensor

locs

Non-uniform sample locations. Each entry along the last dimension corresponds to a spatial axis and should lie in [-N//2, N//2] where N is the grid size along that axis.

TYPE: Float[Tensor, '... D']

grid_size

Desired output grid size for each spatial dimension.

TYPE: tuple of int

oversamp

Oversampling factor for the padded FFT grid. Default is 1.25.

TYPE: float DEFAULT: 1.25

width

Interpolation kernel width. Default is 4.0.

TYPE: float DEFAULT: 4.0

RETURNS DESCRIPTION
Tensor

Gridded data on a regular grid of shape (..., *grid_size).

Source code in src/torchlinops/functional/_nufft.py
def nufft_adjoint(
    x: Tensor,
    locs: Float[Tensor, "... D"],
    grid_size: tuple[int, ...],
    oversamp: float = 1.25,
    width: float = 4.0,
):
    """Functional interface for the adjoint NUFFT.

    Grids non-uniformly sampled data back onto a regular grid. Internally
    applies adjoint interpolation (gridding), inverse FFT, cropping, and
    apodization correction.

    Parameters
    ----------
    x : Tensor
        Non-uniformly sampled data to be gridded.
    locs : Float[Tensor, "... D"]
        Non-uniform sample locations. Each entry along the last dimension
        corresponds to a spatial axis and should lie in
        ``[-N//2, N//2]`` where ``N`` is the grid size along that axis.
    grid_size : tuple of int
        Desired output grid size for each spatial dimension.
    oversamp : float, optional
        Oversampling factor for the padded FFT grid. Default is 1.25.
    width : float, optional
        Interpolation kernel width. Default is 4.0.

    Returns
    -------
    Tensor
        Gridded data on a regular grid of shape ``(..., *grid_size)``.
    """
    params = init_nufft(grid_size, locs, oversamp, width, x.device)

    x = x / params.scale_factor
    x = interpolate_adjoint(
        x,
        params.locs,
        params.padded_size,
        width,
        kernel="kaiser_bessel",
        kernel_params=dict(beta=params.beta),
    )
    x = cifftn(x, dim=params.dim, norm="ortho")
    x = Pad.adj_fn(params.pad_ns, x)
    x = x * params.apodize
    return x

slice2range

slice2range(slice_obj: slice, n: int)

Convert a slice object to a range object given the array size

Examples:

>>> tuple(slice2range(slice(None, None, None), 4))
(0, 1, 2, 3)
>>> tuple(slice2range(slice(None, None, -1), 3))
(2, 1, 0)
Source code in src/torchlinops/functional/_index/index.py
def slice2range(slice_obj: slice, n: int):
    """Convert a slice object to a range object given the array size
    Examples
    --------
    >>> tuple(slice2range(slice(None, None, None), 4))
    (0, 1, 2, 3)
    >>> tuple(slice2range(slice(None, None, -1), 3))
    (2, 1, 0)

    """
    start = (
        slice_obj.start
        if slice_obj.start is not None
        else (0 if slice_obj.step is None or slice_obj.step > 0 else n - 1)
    )
    stop = (
        slice_obj.stop
        if slice_obj.stop is not None
        else (n if slice_obj.step is None or slice_obj.step > 0 else -1)
    )
    step = slice_obj.step if slice_obj.step is not None else 1
    return range(start, stop, step)

unfold

unfold(
    x: Shaped[Tensor, ...],
    block_size: tuple,
    stride: Optional[tuple] = None,
    mask: Optional[Bool[Tensor, ...]] = None,
    output: Optional[Tensor] = None,
) -> Tensor

Wrapper that dispatches complex and real tensors Also precomputes some shapes

PARAMETER DESCRIPTION
x

Shape [B..., *im_size]

TYPE: Tensor

RETURNS DESCRIPTION
Tensor

If mask is not None, block_size will be an int equal to the number of True elements in the mask Otherwise it will be the full block shape.

TYPE: Shape [B..., *blocks, *block_size]

Source code in src/torchlinops/functional/_unfold/unfold.py
def unfold(
    x: Shaped[Tensor, "..."],
    block_size: tuple,
    stride: Optional[tuple] = None,
    mask: Optional[Bool[Tensor, "..."]] = None,
    output: Optional[Tensor] = None,
) -> Tensor:
    """Wrapper that dispatches complex and real tensors
    Also precomputes some shapes

    Parameters
    ----------
    x : Tensor
        Shape [B..., *im_size]

    Returns
    -------
    Tensor: Shape [B..., *blocks, *block_size]
        If mask is not None, block_size will be an int equal to the number of True elements in the mask
        Otherwise it will be the full block shape.


    """
    x_flat, shapes, is_complex = _prep_unfold(x, block_size, stride, mask)
    if is_complex:
        x_flat = torch.view_as_real(x_flat)
        x_flat = torch.flatten(x_flat, -2, -1)  # Flatten real/imag into last dim
    y_flat = _unfold(x_flat, output=output, **shapes)
    y = y_flat.reshape(
        *shapes["batch_shape"],
        *shapes["nblocks"],
        *shapes["block_size"],
    )
    if is_complex:
        y = y.reshape(*y.shape[:-1], y.shape[-1] // 2, 2)
        y = torch.view_as_complex(y)
    if mask is not None:
        y = y[..., mask]
    return y

ungrid

ungrid(
    vals: Inexact[Tensor, ...],
    locs: Float[Tensor, "... D"],
    width: float | tuple[float, ...],
    kernel: str = "kaiser_bessel",
    norm: int = 1,
    pad_mode: Literal["zero", "circular"] = "circular",
    kernel_params: dict = None,
)

Interpolate from on-grid values to off-grid locations.

norm: int, 1 or 2 if 1, computes weights as product of axis-aligned norm weights - Same as sigpy if 2, uses Euclidean norm to grid points to compute weights

Source code in src/torchlinops/functional/_interp/ungrid.py
def ungrid(
    vals: Inexact[Tensor, "..."],
    locs: Float[Tensor, "... D"],
    width: float | tuple[float, ...],
    kernel: str = "kaiser_bessel",
    norm: int = 1,
    pad_mode: Literal["zero", "circular"] = "circular",
    kernel_params: dict = None,
):
    """Interpolate from on-grid values to off-grid locations.

    norm: int, 1 or 2
        if 1, computes weights as product of axis-aligned norm weights
            - Same as sigpy
        if 2, uses Euclidean norm to grid points to compute weights
    """

    kernel_params = {} if kernel_params is None else kernel_params
    vals_flat, locs, shapes = prep_ungrid_shapes(vals, locs, width)
    kernel_params = _apply_default_kernel_params(kernel, kernel_params)
    out_flat = _ungrid(
        vals_flat,
        locs,
        kernel=kernel,
        norm=norm,
        pad_mode=pad_mode,
        kernel_params=kernel_params,
        **shapes,
    )
    out = out_flat.reshape(*shapes["batch_shape"], *shapes["locs_batch_shape"])
    return out