configs/evaluation/projects/bridging/_preprocessing_coco_thingsandstuff.yaml
# @package _global_
# Preprocessing for evaluating semantic segmentation on COCO-Stuff-164k 172 thingsandstuff classes.
#
# Segmentation mask is resized to 320 pixels shorter side and center cropped to 320x320.
dataset:
eval_transforms:
03a_preprocessing:
_target_: ocl.transforms.Map
transform:
_target_: torchvision.transforms.Compose
transforms:
- _target_: ocl.preprocessing.ConvertCocoStuff164kMasks
output_key: segmentation_mask
stuffthings_key: stuffthings_mask
ignore_key: ignore_mask
- _target_: ocl.preprocessing.DropEntries
keys:
- stuffthings_mask
fields:
- stuffthings_mask
batch_transform: false
03b_preprocessing:
_target_: ocl.transforms.SimpleTransform
transforms:
image:
_target_: torchvision.transforms.Compose
transforms:
- _target_: torchvision.transforms.ToTensor
- _target_: torchvision.transforms.Resize
size: 224
interpolation: ${torchvision_interpolation_mode:BICUBIC}
- "${lambda_fn:'lambda image: image.clamp(0.0, 1.0)'}"
- _target_: torchvision.transforms.CenterCrop
size: 224
- _target_: torchvision.transforms.Normalize
mean: [0.485, 0.456, 0.406]
std: [0.229, 0.224, 0.225]
segmentation_mask:
_target_: torchvision.transforms.Compose
transforms:
- _target_: ocl.preprocessing.DenseMaskToTensor
- _target_: ocl.preprocessing.ResizeNearestExact
size: 320
- _target_: torchvision.transforms.CenterCrop
size: 320
ignore_mask:
_target_: torchvision.transforms.Compose
transforms:
- _target_: ocl.preprocessing.DenseMaskToTensor
- _target_: ocl.preprocessing.ResizeNearestExact
size: 320
- _target_: torchvision.transforms.CenterCrop
size: 320
batch_transform: false