configs/experiment/slot_attention/clevr6.yaml
# @package _global_
# Configuration to exactly reproduce unsupervised object recognition of the original slot attention
# paper.
defaults:
- /experiment/slot_attention/_base # (1)!
- /dataset: clevr6 # (2)!
- /experiment/slot_attention/_preprocessing_clevr # (3)!
- /experiment/slot_attention/_metrics_clevr # (4)!
- _self_
# The following parameters assume training on 8 GPUs, leading to an effective batch size of 64.
trainer:
devices: 8
max_steps: 500000
max_epochs:
dataset:
num_workers: 4
batch_size: 8
models:
conditioning:
n_slots: 7
- /experiment/slot_attention/_base
- /dataset/clevr6
- /experiment/slot_attention/_preprocessing_clevr
- /experiment/slot_attention/_metrics_clevr