configs/experiment/slot_attention/clevr10.yaml
# @package _global_
defaults:
- /experiment/slot_attention/_base # (1)!
- /dataset: clevr # (2)!
- /experiment/slot_attention/_preprocessing_clevr # (3)!
- /experiment/slot_attention/_metrics_clevr # (4)!
- _self_
# The following parameters assume training on 8 GPUs, leading to an effective batch size of 64.
trainer:
devices: 8
max_steps: 500000
max_epochs:
dataset:
num_workers: 4
batch_size: 8
models:
conditioning:
n_slots: 11
- /experiment/slot_attention/_base
- /dataset/clevr
- /experiment/slot_attention/_preprocessing_clevr
- /experiment/slot_attention/_metrics_clevr