Skip to content

Data

nanodrr.data

Subject

Subject(
    imagedata: Float[Tensor, "1 1 D H W"],
    labeldata: Float[Tensor, "1 1 D H W"],
    voxel_to_world: Float[Tensor, "4 4"],
    world_to_voxel: Float[Tensor, "4 4"],
    voxel_to_grid: Float[Tensor, "4 4"],
    isocenter: Float[Tensor, 3],
    max_label: int | None = None,
)

Wrapper for a CT volume and (optional) labelmap that is compatible with torch.nn.functional.grid_sample.

Fuses all spatial transforms required for sampling (world → voxel → grid) so that rendering only needs to perform a single matmul.

Source code in src/nanodrr/data/io.py
def __init__(
    self,
    imagedata: Float[torch.Tensor, "1 1 D H W"],
    labeldata: Float[torch.Tensor, "1 1 D H W"],
    voxel_to_world: Float[torch.Tensor, "4 4"],
    world_to_voxel: Float[torch.Tensor, "4 4"],
    voxel_to_grid: Float[torch.Tensor, "4 4"],
    isocenter: Float[torch.Tensor, "3"],
    max_label: int | None = None,
) -> None:
    super().__init__()
    self.register_buffer("image", imagedata)
    self.register_buffer("label", labeldata)
    self.register_buffer("world_to_grid", voxel_to_grid @ world_to_voxel)
    self.register_buffer("isocenter", isocenter)

    self.register_buffer("voxel_to_world", voxel_to_world)
    self.register_buffer("world_to_voxel", world_to_voxel)
    self.register_buffer("voxel_to_grid", voxel_to_grid)

    if max_label is not None:
        self.n_classes = int(max_label + 1)
    else:
        self.n_classes = int(self.label.max().item()) + 1

from_filepath classmethod

from_filepath(
    imagepath: str | Path,
    labelpath: str | Path | None = None,
    convert_to_mu: bool = True,
    mu_water: float = 0.0192,
    mu_bone: float = 0.0573,
    hu_bone: float = 1000.0,
    max_label: int | None = None,
) -> Subject

Load a subject from NIfTI (or any TorchIO-supported) file paths.

PARAMETER DESCRIPTION
imagepath

Path to the CT volume.

TYPE: str | Path

labelpath

Optional path to a label map.

TYPE: str | Path | None DEFAULT: None

convert_to_mu

Convert Hounsfield units to linear attenuation.

TYPE: bool DEFAULT: True

mu_water

Linear attenuation coefficient of water (mm⁻¹).

TYPE: float DEFAULT: 0.0192

max_label

Override the maximum label index. If provided, n_classes is set to max_label + 1 instead of being inferred from the data.

TYPE: int | None DEFAULT: None

Source code in src/nanodrr/data/io.py
@classmethod
def from_filepath(
    cls,
    imagepath: str | Path,
    labelpath: str | Path | None = None,
    convert_to_mu: bool = True,
    mu_water: float = 0.0192,
    mu_bone: float = 0.0573,
    hu_bone: float = 1000.0,
    max_label: int | None = None,
) -> "Subject":
    """Load a subject from NIfTI (or any TorchIO-supported) file paths.

    Args:
        imagepath: Path to the CT volume.
        labelpath: Optional path to a label map.
        convert_to_mu: Convert Hounsfield units to linear attenuation.
        mu_water: Linear attenuation coefficient of water (mm⁻¹).
        max_label: Override the maximum label index. If provided, `n_classes`
            is set to `max_label + 1` instead of being inferred from the data.
    """
    image = ScalarImage(imagepath)
    label = LabelMap(labelpath) if labelpath is not None else None
    return cls.from_images(image, label, convert_to_mu, mu_water, mu_bone, hu_bone, max_label)

from_images classmethod

from_images(
    image: ScalarImage,
    label: LabelMap | None = None,
    convert_to_mu: bool = True,
    mu_water: float = 0.0192,
    mu_bone: float = 0.0573,
    hu_bone: float = 1000.0,
    max_label: int | None = None,
) -> Subject

Construct a subject from TorchIO image objects.

PARAMETER DESCRIPTION
image

CT volume as a ScalarImage.

TYPE: ScalarImage

label

Optional segmentation as a LabelMap.

TYPE: LabelMap | None DEFAULT: None

convert_to_mu

Convert Hounsfield units to linear attenuation.

TYPE: bool DEFAULT: True

mu_water

Linear attenuation coefficient of water (mm⁻¹).

TYPE: float DEFAULT: 0.0192

max_label

Override the maximum label index. If provided, n_classes is set to max_label + 1 instead of being inferred from the data.

TYPE: int | None DEFAULT: None

Source code in src/nanodrr/data/io.py
@classmethod
def from_images(
    cls,
    image: ScalarImage,
    label: LabelMap | None = None,
    convert_to_mu: bool = True,
    mu_water: float = 0.0192,
    mu_bone: float = 0.0573,
    hu_bone: float = 1000.0,
    max_label: int | None = None,
) -> "Subject":
    """Construct a subject from TorchIO image objects.

    Args:
        image: CT volume as a `ScalarImage`.
        label: Optional segmentation as a `LabelMap`.
        convert_to_mu: Convert Hounsfield units to linear attenuation.
        mu_water: Linear attenuation coefficient of water (mm⁻¹).
        max_label: Override the maximum label index. If provided, `n_classes`
            is set to `max_label + 1` instead of being inferred from the data.
    """
    # Affine: invert in float64 for numerical accuracy, then downcast
    voxel_to_world_f64 = torch.from_numpy(image.affine).to(torch.float64)
    voxel_to_world = voxel_to_world_f64.to(torch.float32)
    world_to_voxel = voxel_to_world_f64.inverse().to(torch.float32)

    # Image data
    imagedata = cls._to_bcdhw(image.data).to(torch.float32)
    if convert_to_mu:
        imagedata = hu_to_mu(imagedata, mu_water, mu_bone, hu_bone)

    # Label data
    if label is not None:
        labeldata = cls._to_bcdhw(label.data).to(torch.float32)
    else:
        labeldata = torch.zeros_like(imagedata)

    isocenter = torch.tensor(image.get_center(), dtype=torch.float32)
    voxel_to_grid = cls._make_voxel_to_grid(imagedata.shape)

    return cls(
        imagedata,
        labeldata,
        voxel_to_world,
        world_to_voxel,
        voxel_to_grid,
        isocenter,
        max_label,
    )

nanodrr.data.preprocess

hu_to_mu

hu_to_mu(
    data: Float[Tensor, "1 1 D H W"],
    mu_water: float = 0.0192,
    mu_bone: float = 0.0573,
    hu_bone: float = 1000.0,
) -> Float[Tensor, "1 1 D H W"]

Convert Hounsfield units to linear attenuation coefficients.

Uses bilinear scaling with air-water model for HU ≤ 0 and water-bone model for HU > 0:

\[ \mu = \begin{cases} \mu_{\mathrm{water}} \cdot \left(\frac{\mathrm{HU}}{1000} + 1\right) & \text{if } \mathrm{HU} \leq 0 \\ \mu_{\mathrm{water}} + (\mu_{\mathrm{bone}} - \mu_{\mathrm{water}}) \cdot \frac{\mathrm{HU}}{\mathrm{HU}_{\mathrm{bone}}} & \text{if } \mathrm{HU} > 0 \end{cases} \]
PARAMETER DESCRIPTION
data

CT volume in Hounsfield Units with shape (1, 1, D, H, W).

TYPE: Float[Tensor, '1 1 D H W']

mu_water

Linear attenuation coefficient of water [1/mm] at target energy. Default 0.0192 corresponds to ~70 keV (typical CT effective energy).

TYPE: float DEFAULT: 0.0192

mu_bone

Linear attenuation coefficient of cortical bone [1/mm] at target energy. Default 0.0573 corresponds to ~70 keV.

TYPE: float DEFAULT: 0.0573

hu_bone

HU value corresponding to pure cortical bone. Default 1000. Typical range is 1000-2000 depending on bone type and scanner.

TYPE: float DEFAULT: 1000.0

RETURNS DESCRIPTION
Float[Tensor, '1 1 D H W']

Linear attenuation coefficients [1/mm] with same shape as input.

References

NIST XCOM database for mass attenuation coefficients. Water density: 1.0 g/cm³, cortical bone density: 1.92 g/cm³.

Source code in src/nanodrr/data/preprocess.py
def hu_to_mu(
    data: Float[torch.Tensor, "1 1 D H W"],
    mu_water: float = 0.0192,
    mu_bone: float = 0.0573,
    hu_bone: float = 1000.0,
) -> Float[torch.Tensor, "1 1 D H W"]:
    r"""Convert Hounsfield units to linear attenuation coefficients.

    Uses bilinear scaling with air-water model for HU ≤ 0 and
    water-bone model for HU > 0:

    $$
    \mu = \begin{cases}
        \mu_{\mathrm{water}} \cdot \left(\frac{\mathrm{HU}}{1000} + 1\right) & \text{if } \mathrm{HU} \leq 0 \\
        \mu_{\mathrm{water}} + (\mu_{\mathrm{bone}} - \mu_{\mathrm{water}}) \cdot \frac{\mathrm{HU}}{\mathrm{HU}_{\mathrm{bone}}} & \text{if } \mathrm{HU} > 0
    \end{cases}
    $$

    Args:
        data: CT volume in Hounsfield Units with shape (1, 1, D, H, W).
        mu_water: Linear attenuation coefficient of water [1/mm] at target
            energy. Default 0.0192 corresponds to ~70 keV (typical CT
            effective energy).
        mu_bone: Linear attenuation coefficient of cortical bone [1/mm] at
            target energy. Default 0.0573 corresponds to ~70 keV.
        hu_bone: HU value corresponding to pure cortical bone. Default 1000.
            Typical range is 1000-2000 depending on bone type and scanner.

    Returns:
        Linear attenuation coefficients [1/mm] with same shape as input.

    References:
        NIST XCOM database for mass attenuation coefficients.
        Water density: 1.0 g/cm³, cortical bone density: 1.92 g/cm³.
    """
    hu_clamped = data.clamp(min=-1000.0)

    mu_low = mu_water * (1.0 + hu_clamped / 1000.0)
    mu_high = mu_water + (hu_clamped / hu_bone) * (mu_bone - mu_water)
    mu = torch.where(hu_clamped <= 0, mu_low, mu_high)

    return mu.clamp(min=0.0)

nanodrr.data.demo

download_deepfluoro

download_deepfluoro(subject: int = 1) -> tuple[str, str]

Download a subject from the DeepFluoro dataset.

Source code in src/nanodrr/data/demo.py
def download_deepfluoro(subject: int = 1) -> tuple[str, str]:
    """Download a subject from the DeepFluoro dataset."""
    subject = f"subject{subject:02d}"
    base_url = f"https://huggingface.co/datasets/eigenvivek/xvr-data/resolve/main/deepfluoro/{subject}"
    imagepath = os.path.join(CACHE_DIR, "deepfluoro", subject, "volume.nii.gz")
    labelpath = os.path.join(CACHE_DIR, "deepfluoro", subject, "mask.nii.gz")

    for url, local_path in [
        (f"{base_url}/volume.nii.gz", imagepath),
        (f"{base_url}/mask.nii.gz", labelpath),
    ]:
        if not os.path.exists(local_path):
            os.makedirs(os.path.dirname(local_path), exist_ok=True)
            torch.hub.download_url_to_file(url, local_path)

    return imagepath, labelpath