Skip to content

Color

dataphy.visionpack.tforms.color

Classes

ColorJitter(magnitude: Optional[float] = None, brightness: Optional[float] = None, contrast: Optional[float] = None, saturation: Optional[float] = None, hue: Optional[float] = None, order: Optional[List[str]] = None, space: Literal['rgb', 'hsv', 'hsl'] = 'rgb', preserve_robot_color: bool = False, clip_range: Optional[List[float]] = None, gamma: Optional[float] = None, temperature_shift: Optional[float] = None, correlate_views: float = 0.0, **kwargs: Any)

Bases: BaseTransform

Color jittering transform for image augmentation.

This transform applies random color adjustments to images, including brightness, contrast, saturation, and hue changes. It supports multiple color spaces and can preserve robot colors in robotics datasets.

Parameters:

Name Type Description Default
magnitude Optional[float]

Global magnitude for all color adjustments (overrides individual params)

None
brightness Optional[float]

Brightness adjustment range

None
contrast Optional[float]

Contrast adjustment range

None
saturation Optional[float]

Saturation adjustment range

None
hue Optional[float]

Hue adjustment range

None
order Optional[List[str]]

Order of color operations to apply

None
space Literal['rgb', 'hsv', 'hsl']

Color space to work in ("rgb", "hsv", "hsl")

'rgb'
preserve_robot_color bool

Whether to preserve robot colors using masks

False
clip_range Optional[List[float]]

Range to clip output values

None
gamma Optional[float]

Gamma correction value

None
temperature_shift Optional[float]

Color temperature shift

None
correlate_views float

Probability of using correlated parameters across views

0.0
**kwargs Any

Additional base transform parameters

{}
Source code in src/dataphy/visionpack/tforms/color.py
def __init__(
    self,
    magnitude: Optional[float] = None,
    brightness: Optional[float] = None,
    contrast: Optional[float] = None,
    saturation: Optional[float] = None,
    hue: Optional[float] = None,
    order: Optional[List[str]] = None,
    space: Literal["rgb", "hsv", "hsl"] = "rgb",
    preserve_robot_color: bool = False,
    clip_range: Optional[List[float]] = None,
    gamma: Optional[float] = None,
    temperature_shift: Optional[float] = None,
    correlate_views: float = 0.0,
    **kwargs: Any
):
    """Initialize the ColorJitter transform.

    Args:
        magnitude: Global magnitude for all color adjustments (overrides individual params)
        brightness: Brightness adjustment range
        contrast: Contrast adjustment range
        saturation: Saturation adjustment range
        hue: Hue adjustment range
        order: Order of color operations to apply
        space: Color space to work in ("rgb", "hsv", "hsl")
        preserve_robot_color: Whether to preserve robot colors using masks
        clip_range: Range to clip output values
        gamma: Gamma correction value
        temperature_shift: Color temperature shift
        correlate_views: Probability of using correlated parameters across views
        **kwargs: Additional base transform parameters
    """
    super().__init__(**kwargs)  # type: ignore

    # Use individual parameters if provided, otherwise fall back to magnitude
    if magnitude is not None and all(x is None for x in [brightness, contrast, saturation, hue]):
        self.brightness = magnitude
        self.contrast = magnitude
        self.saturation = magnitude
        self.hue = magnitude
    else:
        self.brightness = brightness or 0.0
        self.contrast = contrast or 0.0
        self.saturation = saturation or 0.0
        self.hue = hue or 0.0

    self.order = order or ["brightness", "contrast", "saturation", "hue"]
    self.space = space
    self.preserve_robot_color = preserve_robot_color
    self.clip_range = clip_range or [0.0, 1.0]
    self.gamma = gamma
    self.temperature_shift = temperature_shift
    self.correlate_views = correlate_views

    # Store correlated parameters for multi-view consistency
    self._correlated_params = {}
    self._last_episode_id = None
Attributes
brightness = magnitude instance-attribute
contrast = magnitude instance-attribute
saturation = magnitude instance-attribute
hue = magnitude instance-attribute
order = order or ['brightness', 'contrast', 'saturation', 'hue'] instance-attribute
space = space instance-attribute
preserve_robot_color = preserve_robot_color instance-attribute
clip_range = clip_range or [0.0, 1.0] instance-attribute
gamma = gamma instance-attribute
temperature_shift = temperature_shift instance-attribute
correlate_views = correlate_views instance-attribute
p = p instance-attribute
apply_to = apply_to if apply_to is not None else ['rgb'] instance-attribute
sync_views = sync_views instance-attribute
update_intrinsics = update_intrinsics instance-attribute
mask_protect = mask_protect if mask_protect is not None else [] instance-attribute
min_visible_mask_pct = min_visible_mask_pct instance-attribute
resample = resample instance-attribute
border_mode = border_mode instance-attribute
pad_mode = pad_mode instance-attribute
pad_value = pad_value instance-attribute
seed_policy = seed_policy instance-attribute
Functions
forward(batch: Dict[str, Any]) -> Dict[str, Any]

Base forward method that handles cross-cutting logic.

Source code in src/dataphy/visionpack/tforms/base.py
def forward(self, batch: Dict[str, Any]) -> Dict[str, Any]:
    """Base forward method that handles cross-cutting logic."""
    if not self._should_apply():
        return batch

    # Setup seed policy
    episode_id = batch.get("episode_id")
    self._setup_seed(episode_id)

    # Apply transform
    return self._apply_transform(batch)