Skip to content

configs/experiment/slot_attention/_preprocessing_coco.yaml

# @package _global_
dataset:
  eval_transforms:
    03a_preprocessing:
      _target_: ocl.transforms.Map
      transform:
        _target_: torchvision.transforms.Compose
        transforms:
          - _target_: ocl.preprocessing.InstanceMasksToDenseMasks
          - _target_: ocl.preprocessing.AddSegmentationMaskFromInstanceMask
          - _target_: ocl.preprocessing.AddEmptyMasks
            mask_keys:
              - instance_mask
              - segmentation_mask
        # Drop instance_category again as some images do not contain it
          - _target_: ocl.preprocessing.DropEntries
            keys:
              - instance_category

      fields:
        - image
        - instance_mask
        - instance_category
      batch_transform: false
    03b_preprocessing:
      _target_: ocl.transforms.SimpleTransform
      transforms:
        image:
          _target_: torchvision.transforms.Compose
          transforms:
            - _target_: torchvision.transforms.ToTensor
            - _target_: torchvision.transforms.Resize
              size: 128
              interpolation: ${torchvision_interpolation_mode:BILINEAR}
            - _target_: torchvision.transforms.CenterCrop
              size: 128
            - _target_: torchvision.transforms.Normalize
              mean: [0.5, 0.5, 0.5]
              std: [0.5, 0.5, 0.5]
        instance_mask:
          _target_: torchvision.transforms.Compose
          transforms:
            - _target_: ocl.preprocessing.DenseMaskToTensor
            - _target_: ocl.preprocessing.ResizeNearestExact
              size: 128
            - _target_: torchvision.transforms.CenterCrop
              size: 128
        segmentation_mask:
          _target_: torchvision.transforms.Compose
          transforms:
            - _target_: ocl.preprocessing.DenseMaskToTensor
            - _target_: ocl.preprocessing.ResizeNearestExact
              size: 128
            - _target_: torchvision.transforms.CenterCrop
              size: 128
      batch_transform: false
  train_transforms:
    03b_preprocessing:
      _target_: ocl.transforms.SimpleTransform
      transforms:
        image:
          _target_: torchvision.transforms.Compose
          transforms:
            - _target_: torchvision.transforms.ToTensor
            - _target_: torchvision.transforms.Resize
              size: 128
              interpolation: ${torchvision_interpolation_mode:BILINEAR}
            - _target_: torchvision.transforms.RandomCrop
              size: 128
            - _target_: torchvision.transforms.RandomHorizontalFlip
            - _target_: torchvision.transforms.Normalize
              mean: [0.5, 0.5, 0.5]
              std: [0.5, 0.5, 0.5]
      batch_transform: false