configs/evaluation/projects/bridging/metrics_coco_ccrop_sa.yaml
# @package _global_
# Evaluate center crop masks on COCO from a slot attention model.
defaults:
- /evaluation_config # (1)!
- /evaluation/projects/bridging/_base_metrics # (2)!
- /evaluation/projects/bridging/_preprocessing_coco # (3)!
- /evaluation/projects/bridging/_metrics_discovery_masks # (4)!
- /evaluation/projects/bridging/_metrics_segmentation # (5)!
- /dataset: coco_nocrowd # (6)!
- _self_
eval_batch_size: 16
modules:
masks_resized:
patch_mode: false
plugins:
03b_preprocessing:
evaluation_transforms:
image:
_target_: torchvision.transforms.Compose
transforms:
- _target_: torchvision.transforms.ToTensor
- _target_: torchvision.transforms.Resize
size: 128
interpolation: ${torchvision_interpolation_mode:BILINEAR}
- _target_: torchvision.transforms.CenterCrop
size: 128
- _target_: torchvision.transforms.Normalize
mean: [0.5, 0.5, 0.5]
std: [0.5, 0.5, 0.5]
instance_mask:
_target_: torchvision.transforms.Compose
transforms:
- _target_: ocl.preprocessing.DenseMaskToTensor
- _target_: ocl.preprocessing.ResizeNearestExact
size: 320
- _target_: torchvision.transforms.CenterCrop
size: 320
segmentation_mask:
_target_: torchvision.transforms.Compose
transforms:
- _target_: ocl.preprocessing.DenseMaskToTensor
- _target_: ocl.preprocessing.ResizeNearestExact
size: 320
- _target_: torchvision.transforms.CenterCrop
size: 320
- /evaluation_config
- /evaluation/projects/bridging/_base_metrics
- /evaluation/projects/bridging/_preprocessing_coco
- /evaluation/projects/bridging/_metrics_discovery_masks
- /evaluation/projects/bridging/_metrics_segmentation
- /dataset/coco_nocrowd