Skip to content

ArrayToBlocks/BlocksToArray

torchlinops.linops.ArrayToBlocks

Bases: NamedLinop

Extract sliding windows from an array.

Adjoint of BlocksToArray.

Source code in src/torchlinops/linops/array_to_blocks.py
class ArrayToBlocks(NamedLinop):
    """Extract sliding windows from an array.

    Adjoint of [BlocksToArray](#BlocksToArray).

    """

    def __init__(
        self,
        grid_size: tuple[int, ...],
        block_size: tuple[int, ...],
        stride: tuple[int, ...],
        mask: Optional[Tensor] = None,
        batch_shape: Optional[Shape] = None,
        array_shape: Optional[Shape] = None,
        blocks_shape: Optional[Shape] = None,
    ):
        """
        Parameters
        ----------
        grid_size : tuple[int, ...]
            Size of the input array spatial dimensions.
        block_size : tuple[int, ...]
            Size of each extracted block.
        stride : tuple[int, ...]
            Stride between consecutive blocks.
        mask : Tensor, optional
            Boolean mask selecting a subset of blocks.
        batch_shape : Shape, optional
            Named shape for batch dimensions.
        array_shape : Shape, optional
            Named shape for the input array dimensions.
        blocks_shape : Shape, optional
            Named shape for the output block dimensions.
        """
        self.grid_size = grid_size
        self.ndim = len(self.grid_size)
        self.block_size = block_size
        self.stride = stride

        self.batch_shape = default_to(("...",), batch_shape)
        self.array_shape = default_to(("...",), array_shape)
        self.blocks_shape = default_to(("...",), blocks_shape)
        shape = NS(self.batch_shape) + NS(self.array_shape, self.blocks_shape)
        super().__init__(shape)

        if mask is not None:
            self.mask = nn.Parameter(mask, requires_grad=False)
        else:
            self.mask = mask

    @staticmethod
    def fn(arraytoblocks, x, /):
        return F.array_to_blocks(
            x,
            arraytoblocks.block_size,
            arraytoblocks.stride,
            arraytoblocks.mask,
        )

    @staticmethod
    def adj_fn(arraytoblocks, x, /):
        return F.blocks_to_array(
            x,
            arraytoblocks.grid_size,
            arraytoblocks.block_size,
            arraytoblocks.stride,
            arraytoblocks.mask,
        )

    @staticmethod
    def normal_fn(arraytoblocks, x, /):
        return arraytoblocks.adj_fn(arraytoblocks, arraytoblocks.fn(arraytoblocks, x))

    def split_forward(self, ibatch, obatch):
        return copy(self)

    def adjoint(self):
        return BlocksToArray(
            self.grid_size,
            self.block_size,
            self.stride,
            self.mask,
            self.batch_shape,
            self.blocks_shape,
            self.array_shape,
        )

    def size(self, dim):
        ndim = len(self.grid_size)
        if dim in self.ishape[-ndim:]:
            i = self.ishape.index(dim) - len(self.ishape)
            return self.grid_size[i]
        return None

__init__

__init__(
    grid_size: tuple[int, ...],
    block_size: tuple[int, ...],
    stride: tuple[int, ...],
    mask: Optional[Tensor] = None,
    batch_shape: Optional[Shape] = None,
    array_shape: Optional[Shape] = None,
    blocks_shape: Optional[Shape] = None,
)
PARAMETER DESCRIPTION
grid_size

Size of the input array spatial dimensions.

TYPE: tuple[int, ...]

block_size

Size of each extracted block.

TYPE: tuple[int, ...]

stride

Stride between consecutive blocks.

TYPE: tuple[int, ...]

mask

Boolean mask selecting a subset of blocks.

TYPE: Tensor DEFAULT: None

batch_shape

Named shape for batch dimensions.

TYPE: Shape DEFAULT: None

array_shape

Named shape for the input array dimensions.

TYPE: Shape DEFAULT: None

blocks_shape

Named shape for the output block dimensions.

TYPE: Shape DEFAULT: None

Source code in src/torchlinops/linops/array_to_blocks.py
def __init__(
    self,
    grid_size: tuple[int, ...],
    block_size: tuple[int, ...],
    stride: tuple[int, ...],
    mask: Optional[Tensor] = None,
    batch_shape: Optional[Shape] = None,
    array_shape: Optional[Shape] = None,
    blocks_shape: Optional[Shape] = None,
):
    """
    Parameters
    ----------
    grid_size : tuple[int, ...]
        Size of the input array spatial dimensions.
    block_size : tuple[int, ...]
        Size of each extracted block.
    stride : tuple[int, ...]
        Stride between consecutive blocks.
    mask : Tensor, optional
        Boolean mask selecting a subset of blocks.
    batch_shape : Shape, optional
        Named shape for batch dimensions.
    array_shape : Shape, optional
        Named shape for the input array dimensions.
    blocks_shape : Shape, optional
        Named shape for the output block dimensions.
    """
    self.grid_size = grid_size
    self.ndim = len(self.grid_size)
    self.block_size = block_size
    self.stride = stride

    self.batch_shape = default_to(("...",), batch_shape)
    self.array_shape = default_to(("...",), array_shape)
    self.blocks_shape = default_to(("...",), blocks_shape)
    shape = NS(self.batch_shape) + NS(self.array_shape, self.blocks_shape)
    super().__init__(shape)

    if mask is not None:
        self.mask = nn.Parameter(mask, requires_grad=False)
    else:
        self.mask = mask

torchlinops.linops.BlocksToArray

Bases: NamedLinop

Compose several equally-sized blocks into a larger array.

Adjoint of ArrayToBlocks.

Source code in src/torchlinops/linops/array_to_blocks.py
class BlocksToArray(NamedLinop):
    """Compose several equally-sized blocks into a larger array.

    Adjoint of [ArrayToBlocks](#ArrayToBlocks).
    """

    def __init__(
        self,
        grid_size: tuple[int, ...],
        block_size: tuple[int, ...],
        stride: tuple[int, ...],
        mask: Optional[Tensor] = None,
        batch_shape: Optional = None,
        blocks_shape: Optional = None,
        array_shape: Optional = None,
    ):
        """
        Parameters
        ----------
        grid_size : tuple[int, ...]
            Size of the output array spatial dimensions.
        block_size : tuple[int, ...]
            Size of each block.
        stride : tuple[int, ...]
            Stride between consecutive blocks.
        mask : Tensor, optional
            Boolean mask selecting a subset of blocks.
        batch_shape : optional
            Named shape for batch dimensions.
        blocks_shape : optional
            Named shape for the input block dimensions.
        array_shape : optional
            Named shape for the output array dimensions.
        """
        self.grid_size = grid_size
        self.ndim = len(self.grid_size)
        self.block_size = block_size
        self.stride = stride

        self.batch_shape = default_to(("...",), batch_shape)
        self.blocks_shape = default_to(("...",), blocks_shape)
        self.array_shape = default_to(("...",), array_shape)
        shape = NS(self.batch_shape) + NS(self.blocks_shape, self.array_shape)
        super().__init__(shape)
        if mask is not None:
            self.mask = nn.Parameter(mask, requires_grad=False)
        else:
            self.mask = mask

    @staticmethod
    def fn(blockstoarray, x, /):
        return F.blocks_to_array(
            x,
            blockstoarray.grid_size,
            blockstoarray.block_size,
            blockstoarray.stride,
            blockstoarray.mask,
        )

    @staticmethod
    def adj_fn(blockstoarray, x, /):
        if x.shape[-blockstoarray.ndim :] != blockstoarray.grid_size:
            raise RuntimeError(
                f"BlocksToArray expected input with full size {blockstoarray.grid_size} but got {x.shape}"
            )
        return F.array_to_blocks(
            x,
            blockstoarray.block_size,
            blockstoarray.stride,
            blockstoarray.mask,
        )

    def split_forward(self, ibatch, obatch):
        return copy(self)

    def adjoint(self):
        return ArrayToBlocks(
            self.grid_size,
            self.block_size,
            self.stride,
            self.mask,
            self.batch_shape,
            self.array_shape,
            self.blocks_shape,
        )

    def size(self, dim):
        ndim = len(self.grid_size)
        if dim in self.oshape[-ndim:]:
            i = self.oshape.index(dim) - len(self.oshape)
            return self.grid_size[i]
        return None

__init__

__init__(
    grid_size: tuple[int, ...],
    block_size: tuple[int, ...],
    stride: tuple[int, ...],
    mask: Optional[Tensor] = None,
    batch_shape: Optional = None,
    blocks_shape: Optional = None,
    array_shape: Optional = None,
)
PARAMETER DESCRIPTION
grid_size

Size of the output array spatial dimensions.

TYPE: tuple[int, ...]

block_size

Size of each block.

TYPE: tuple[int, ...]

stride

Stride between consecutive blocks.

TYPE: tuple[int, ...]

mask

Boolean mask selecting a subset of blocks.

TYPE: Tensor DEFAULT: None

batch_shape

Named shape for batch dimensions.

TYPE: optional DEFAULT: None

blocks_shape

Named shape for the input block dimensions.

TYPE: optional DEFAULT: None

array_shape

Named shape for the output array dimensions.

TYPE: optional DEFAULT: None

Source code in src/torchlinops/linops/array_to_blocks.py
def __init__(
    self,
    grid_size: tuple[int, ...],
    block_size: tuple[int, ...],
    stride: tuple[int, ...],
    mask: Optional[Tensor] = None,
    batch_shape: Optional = None,
    blocks_shape: Optional = None,
    array_shape: Optional = None,
):
    """
    Parameters
    ----------
    grid_size : tuple[int, ...]
        Size of the output array spatial dimensions.
    block_size : tuple[int, ...]
        Size of each block.
    stride : tuple[int, ...]
        Stride between consecutive blocks.
    mask : Tensor, optional
        Boolean mask selecting a subset of blocks.
    batch_shape : optional
        Named shape for batch dimensions.
    blocks_shape : optional
        Named shape for the input block dimensions.
    array_shape : optional
        Named shape for the output array dimensions.
    """
    self.grid_size = grid_size
    self.ndim = len(self.grid_size)
    self.block_size = block_size
    self.stride = stride

    self.batch_shape = default_to(("...",), batch_shape)
    self.blocks_shape = default_to(("...",), blocks_shape)
    self.array_shape = default_to(("...",), array_shape)
    shape = NS(self.batch_shape) + NS(self.blocks_shape, self.array_shape)
    super().__init__(shape)
    if mask is not None:
        self.mask = nn.Parameter(mask, requires_grad=False)
    else:
        self.mask = mask