dlc2action.options

Here all option dictionaries are stored

  1#
  2# Copyright 2020-present by A. Mathis Group and contributors. All rights reserved.
  3#
  4# This project and all its files are licensed under GNU AGPLv3 or later version. 
  5# A copy is included in dlc2action/LICENSE.AGPL.
  6#
  7"""
  8Here all option dictionaries are stored
  9"""
 10
 11from dlc2action.data.annotation_store import (
 12    BorisStore,
 13    CalMS21Store,
 14    CSVActionSegStore,
 15    DLC2ActionStore,
 16    EmptyBehaviorStore,
 17    SIMBAStore,
 18)
 19from dlc2action.data.input_store import (
 20    CalMS21InputStore,
 21    DLCTrackletStore,
 22    DLCTrackStore,
 23    LoadedFeaturesInputStore,
 24    Numpy3DInputStore,
 25    SIMBAInputStore,
 26    ESKTrackStore,
 27)
 28from dlc2action.feature_extraction import HeatmapExtractor, KinematicExtractor
 29from dlc2action.loss import MS_TCN_Loss
 30from dlc2action.metric.metrics import (
 31    F1,
 32    PR_AUC,
 33    Accuracy,
 34    Count,
 35    EditDistance,
 36    Fbeta,
 37    mAP,
 38    Precision,
 39    Recall,
 40    SegmentalF1,
 41    SegmentalFbeta,
 42    SegmentalPrecision,
 43    SegmentalRecall,
 44    SemiSegmentalF1,
 45    SemiSegmentalPR_AUC,
 46    SemiSegmentalPrecision,
 47    SemiSegmentalRecall,
 48)
 49
 50
 51# from dlc2action.model.c3d import C3D_A, C3D_A_MS
 52from dlc2action.model.asformer import ASFormer
 53from dlc2action.model.c2f_tcn import C2F_TCN
 54from dlc2action.model.c2f_transformer import C2F_Transformer
 55from dlc2action.model.edtcn import EDTCN
 56from dlc2action.model.mlp import MLP
 57from dlc2action.model.ms_tcn import MS_TCN3  # ,MS_TCN_P
 58from dlc2action.model.transformer import Transformer
 59from dlc2action.model.motionbert import MotionBERT
 60
 61from dlc2action.ssl.contrastive import (
 62    ContrastiveMaskedSSL,
 63    ContrastiveRegressionSSL,
 64    ContrastiveSSL,
 65    PairwiseMaskedSSL,
 66    PairwiseSSL,
 67)
 68from dlc2action.ssl.masked import (
 69    MaskedFeaturesSSL_FC,
 70    MaskedFramesSSL_FC,
 71    MaskedKinematicSSL_FC,
 72    MaskedFeaturesSSL_TCN,
 73    MaskedFramesSSL_TCN,
 74    MaskedKinematicSSL_TCN,
 75)
 76from dlc2action.ssl.segment_order import OrderSSL, ReverseSSL
 77from dlc2action.ssl.tcc import TCCSSL
 78from dlc2action.transformer.heatmap import HeatmapTransformer
 79from dlc2action.transformer.kinematic import KinematicTransformer
 80from torch.optim import SGD, Adam
 81
 82input_stores = {
 83    "dlc_tracklet": DLCTrackletStore,
 84    "dlc_track": DLCTrackStore,
 85    "calms21": CalMS21InputStore,
 86    "np_3d": Numpy3DInputStore,
 87    "features": LoadedFeaturesInputStore,
 88    "simba": SIMBAInputStore,
 89    "esk_track": ESKTrackStore,
 90
 91}
 92
 93annotation_stores = {
 94    "dlc": DLC2ActionStore,
 95    "boris": BorisStore,
 96    "none": EmptyBehaviorStore,
 97    "calms21": CalMS21Store,
 98    "csv": CSVActionSegStore,
 99    "simba": SIMBAStore,
100}
101
102feature_extractors = {"kinematic": KinematicExtractor, "heatmap": HeatmapExtractor}
103
104ssl_constructors = {
105    "masked_features": MaskedFeaturesSSL_FC,
106    "masked_joints": MaskedKinematicSSL_FC,
107    "masked_frames": MaskedFramesSSL_FC,
108    "contrastive": ContrastiveSSL,
109    "pairwise": PairwiseSSL,
110    "contrastive_masked": ContrastiveMaskedSSL,
111    "pairwise_masked": PairwiseMaskedSSL,
112    "reverse": ReverseSSL,
113    "order": OrderSSL,
114    "contrastive_regression": ContrastiveRegressionSSL,
115    "tcc": TCCSSL,
116}
117
118ssl_constructors_tcn = {
119    "masked_features": MaskedFeaturesSSL_TCN,
120    "masked_joints": MaskedKinematicSSL_TCN,
121    "masked_frames": MaskedFramesSSL_TCN,
122    "contrastive": ContrastiveSSL,
123    "pairwise": PairwiseSSL,
124    "contrastive_masked": ContrastiveMaskedSSL,
125    "pairwise_masked": PairwiseMaskedSSL,
126    "reverse": ReverseSSL,
127    "order": OrderSSL,
128    "contrastive_regression": ContrastiveRegressionSSL,
129    "tcc": TCCSSL,
130}
131
132transformers = {"kinematic": KinematicTransformer, "heatmap": HeatmapTransformer}
133
134losses = {
135    "ms_tcn": MS_TCN_Loss,
136}
137losses_multistage = [
138    "ms_tcn",
139]  # losses that expect predictions of shape (#stages, #batch, #classes, #frames)
140
141metrics = {
142    "accuracy": Accuracy,
143    "precision": Precision,
144    "f1": F1,
145    "recall": Recall,
146    "count": Count,
147    "segmental_precision": SegmentalPrecision,
148    "segmental_recall": SegmentalRecall,
149    "segmental_f1": SegmentalF1,
150    "edit_distance": EditDistance,
151    "f_beta": Fbeta,
152    "segmental_f_beta": SegmentalFbeta,
153    "semisegmental_precision": SemiSegmentalPrecision,
154    "semisegmental_recall": SemiSegmentalRecall,
155    "semisegmental_f1": SemiSegmentalF1,
156    "pr-auc": PR_AUC,
157    "semisegmental_pr-auc": SemiSegmentalPR_AUC,
158    "mAP": mAP,
159}
160metrics_minimize = [
161    "edit_distance"
162]  # metrics that decrease when prediction quality increases
163metrics_no_direction = ["count"]  # metrics that do not indicate prediction quality
164
165optimizers = {"Adam": Adam, "SGD": SGD}
166
167models = {
168    "ms_tcn3": MS_TCN3,
169    "asformer": ASFormer,
170    "mlp": MLP,
171    "c2f_tcn": C2F_TCN,
172    "edtcn": EDTCN,
173    "transformer": Transformer,
174    "c2f_transformer": C2F_Transformer,
175    "motionbert": MotionBERT,
176}
177
178
179blanks = [
180    "dataset_inverse_weights",
181    "dataset_proportional_weights",
182    "dataset_classes",
183    "dataset_features",
184    "dataset_len_segment",
185    "dataset_bodyparts",
186    "dataset_boundary_weight",
187    "model_features",
188]
189
190extractor_to_transformer = {
191    "kinematic": "kinematic",
192    "heatmap": "heatmap",
193}  # keys are feature extractor names, values are transformer names
194
195partition_methods = {
196    "random": [
197        "random",
198        "random:test-from-name",
199        "random:test-from-name:{name}",
200        "random:equalize:segments",
201        "random:equalize:videos",
202    ],
203    "fixed": [
204        "val-from-name:{val_name}:test-from-name:{test_name}",
205        "time",
206        "time:start-from:{frac}",
207        "time:start-from:{frac}:strict",
208        "time:strict",
209        "file",
210        "folders",
211    ],
212}
213
214basic_parameters = {
215    "data": [
216        "data_suffix",
217        "feature_suffix",
218        "annotation_suffix",
219        "canvas_shape",
220        "ignored_bodyparts",
221        "likelihood_threshold",
222        "behaviors",
223        "filter_annotated",
224        "filter_background",
225        "visibility_min_score",
226        "visibility_min_frac",
227    ],
228    "augmentations": {
229        "heatmap": ["augmentations", "rotation_degree_limits"],
230        "kinematic": [
231            "augmentations",
232            "rotation_limits",
233            "mirror_dim",
234            "noise_std",
235            "zoom_limits",
236            "masking_probability",
237        ],
238    },
239    "features": {
240        "heatmap": ["keys", "channel_policy", "heatmap_width", "sigma"],
241        "kinematic": [
242            "keys",
243            "averaging_window",
244            "distance_pairs",
245            "angle_pairs",
246            "zone_vertices",
247            "zone_bools",
248            "zone_distances",
249            "area_vertices",
250        ],
251    },
252    "model": {
253        "asformer": [
254            "num_decoders",
255            "num_layers",
256            "r1",
257            "r2",
258            "num_f_maps",
259            "channel_masking_rate",
260        ],
261        "c2f_tcn": ["num_f_maps", "feature_dim"],
262        "c2f_transformer": ["num_f_maps", "feature_dim", "heads"],
263        "edtcn": ["kernel_size", "mid_channels"],
264        "mlp": ["f_maps_list", "dropout_rates"],
265        "ms_tcn3": [
266            "num_layers_PG",
267            "num_layers_R",
268            "num_R",
269            "num_f_maps",
270            "shared_weights",
271        ],
272        "transformer": ["num_f_maps", "N", "heads", "num_pool"],
273    },
274    "general": [
275        "model_name",
276        "metric_functions",
277        "ignored_clips",
278        "len_segment",
279        "overlap",
280        "interactive",
281    ],
282    "losses": {
283        "ms_tcn": ["focal", "gamma", "alpha"],
284    },
285    "metrics": {
286        "f1": ["average", "ignored_classes", "threshold_value"],
287        "precision": ["average", "ignored_classes", "threshold_value"],
288        "recall": ["average", "ignored_classes", "threshold_value"],
289        "f_beta": ["average", "ignored_classes", "threshold_value", "beta"],
290        "count": ["classes"],
291        "segmental_precision": [
292            "average",
293            "ignored_classes",
294            "threshold_value",
295            "iou_threshold",
296        ],
297        "segmental_recall": [
298            "average",
299            "ignored_classes",
300            "threshold_value",
301            "iou_threshold",
302        ],
303        "segmental_f1": [
304            "average",
305            "ignored_classes",
306            "threshold_value",
307            "iou_threshold",
308        ],
309        "segmental_f_beta": [
310            "average",
311            "ignored_classes",
312            "threshold_value",
313            "iou_threshold",
314        ],
315        "pr-auc": ["average", "ignored_classes", "threshold_step"],
316        "mAP": ["average", "ignored_classes", "iou_threshold", "threshold_value"],
317        "semisegmental_precision": ["average", "ignored_classes", "iou_threshold"],
318        "semisegmental_recall": ["average", "ignored_classes", "iou_threshold"],
319        "semisegmental_f1": ["average", "ignored_classes", "iou_threshold"],
320    },
321    "training": [
322        "lr",
323        "device",
324        "num_epochs",
325        "to_ram",
326        "batch_size",
327        "normalize",
328        "temporal_subsampling_size",
329        "parallel",
330        "val_frac",
331        "test_frac",
332        "partition_method",
333    ],
334}
335
336model_hyperparameters = {
337    "asformer": {
338        "losses/ms_tcn/alpha": ("float_log", 1e-5, 1e-2),
339        "losses/ms_tcn/focal": ("categorical", [True, False]),
340        "training/temporal_subsampling_size": ("float", 0.75, 1),
341        "model/num_decoders": ("int", 1, 4),
342        "model/num_f_maps": ("categorical", [32, 64, 128]),
343        "model/num_layers": ("int", 5, 10),
344        "model/channel_masking_rate": ("float", 0.2, 0.4),
345        "general/len_segment": ("categorical", [256, 512, 1024, 2048]),
346        "losses/ms_tcn/weights": ("categorical", [None, "dataset_inverse_weights"]),
347    },
348    "c2f_tcn": {
349        "losses/ms_tcn/alpha": ("float_log", 1e-5, 1e-2),
350        "losses/ms_tcn/focal": ("categorical", [True, False]),
351        "training/temporal_subsampling_size": ("float", 0.75, 1),
352        "model/num_f_maps": ("int_log", 32, 128),
353        "general/len_segment": ("categorical", [512, 1024, 2048]),
354        "losses/ms_tcn/weights": ("categorical", [None, "dataset_inverse_weights"]),
355    },
356    "c2f_transformer": {
357        "losses/ms_tcn/alpha": ("float_log", 1e-5, 1e-2),
358        "losses/ms_tcn/focal": ("categorical", [True, False]),
359        "training/temporal_subsampling_size": ("float", 0.75, 1),
360        "model/num_f_maps": ("categorical", [32, 64, 128]),
361        "model/heads": ("categorical", [1, 2, 4, 8]),
362        "general/len_segment": ("categorical", [512, 1024, 2048]),
363        "losses/ms_tcn/weights": ("categorical", [None, "dataset_inverse_weights"]),
364    },
365    "edtcn": {
366        "losses/ms_tcn/alpha": ("float_log", 1e-5, 1e-2),
367        "losses/ms_tcn/focal": ("categorical", [True, False]),
368        "training/temporal_subsampling_size": ("float", 0.75, 1),
369        "general/len_segment": ("categorical", [256, 512, 1024, 2048]),
370        "losses/ms_tcn/weights": ("categorical", [None, "dataset_inverse_weights"]),
371    },
372    "ms_tcn3": {
373        "losses/ms_tcn/alpha": ("float_log", 1e-5, 1e-2),
374        "losses/ms_tcn/focal": ("categorical", [True, False]),
375        "training/temporal_subsampling_size": ("float", 0.75, 1),
376        "model/num_layers_PG": ("int", 5, 20),
377        "model/shared_weights": ("categorical", [True, False]),
378        "model/num_layers_R": ("int", 5, 10),
379        "model/num_f_maps": ("int_log", 32, 128),
380        "general/len_segment": ("categorical", [256, 512, 1024, 2048]),
381        "losses/ms_tcn/weights": ("categorical", [None, "dataset_inverse_weights"]),
382    },
383    "transformer": {
384        "losses/ms_tcn/alpha": ("float_log", 1e-5, 1e-2),
385        "losses/ms_tcn/focal": ("categorical", [True, False]),
386        "training/temporal_subsampling_size": ("float", 0.75, 1),
387        "model/N": ("int", 5, 12),
388        "model/heads": ("categorical", [1, 2, 4, 8]),
389        "model/num_pool": ("int", 0, 4),
390        "model/add_batchnorm": ("categorical", [True, False]),
391        "general/len_segment": ("categorical", [256, 512, 1024, 2048]),
392        "losses/ms_tcn/weights": ("categorical", [None, "dataset_inverse_weights"]),
393    },
394    "mlp": {
395        "losses/ms_tcn/alpha": ("float_log", 1e-5, 1e-2),
396        "losses/ms_tcn/focal": ("categorical", [True, False]),
397        "training/temporal_subsampling_size": ("float", 0.75, 1),
398        "model/dropout_rates": ("float", 0.3, 0.6),
399        "losses/ms_tcn/weights": ("categorical", [None, "dataset_inverse_weights"]),
400    },
401}
402
403dlc2action_colormaps = {
404    "default": [
405    "#BBBBBF",
406    "#99d096",
407    "#ea678e",
408    "#f9ba5b",
409    "#639cd2",
410    "#F1F285",
411    "#B16CB9",
412    "#ABE3CE",
413    "#DD98A5",
414    "#C44F53",
415    "#BCC144",
416    "#D6AF85",
417    "#BBBBBF",
418]
419}
feature_extractors = {'kinematic': <class 'dlc2action.feature_extraction.KinematicExtractor'>, 'heatmap': <class 'dlc2action.feature_extraction.HeatmapExtractor'>}
ssl_constructors = {'masked_features': <class 'dlc2action.ssl.masked.MaskedFeaturesSSL_FC'>, 'masked_joints': <class 'dlc2action.ssl.masked.MaskedKinematicSSL_FC'>, 'masked_frames': <class 'dlc2action.ssl.masked.MaskedFramesSSL_FC'>, 'contrastive': <class 'dlc2action.ssl.contrastive.ContrastiveSSL'>, 'pairwise': <class 'dlc2action.ssl.contrastive.PairwiseSSL'>, 'contrastive_masked': <class 'dlc2action.ssl.contrastive.ContrastiveMaskedSSL'>, 'pairwise_masked': <class 'dlc2action.ssl.contrastive.PairwiseMaskedSSL'>, 'reverse': <class 'dlc2action.ssl.segment_order.ReverseSSL'>, 'order': <class 'dlc2action.ssl.segment_order.OrderSSL'>, 'contrastive_regression': <class 'dlc2action.ssl.contrastive.ContrastiveRegressionSSL'>, 'tcc': <class 'dlc2action.ssl.tcc.TCCSSL'>}
ssl_constructors_tcn = {'masked_features': <class 'dlc2action.ssl.masked.MaskedFeaturesSSL_TCN'>, 'masked_joints': <class 'dlc2action.ssl.masked.MaskedKinematicSSL_TCN'>, 'masked_frames': <class 'dlc2action.ssl.masked.MaskedFramesSSL_TCN'>, 'contrastive': <class 'dlc2action.ssl.contrastive.ContrastiveSSL'>, 'pairwise': <class 'dlc2action.ssl.contrastive.PairwiseSSL'>, 'contrastive_masked': <class 'dlc2action.ssl.contrastive.ContrastiveMaskedSSL'>, 'pairwise_masked': <class 'dlc2action.ssl.contrastive.PairwiseMaskedSSL'>, 'reverse': <class 'dlc2action.ssl.segment_order.ReverseSSL'>, 'order': <class 'dlc2action.ssl.segment_order.OrderSSL'>, 'contrastive_regression': <class 'dlc2action.ssl.contrastive.ContrastiveRegressionSSL'>, 'tcc': <class 'dlc2action.ssl.tcc.TCCSSL'>}
losses = {'ms_tcn': <class 'dlc2action.loss.ms_tcn.MS_TCN_Loss'>}
losses_multistage = ['ms_tcn']
metrics = {'accuracy': <class 'dlc2action.metric.metrics.Accuracy'>, 'precision': <class 'dlc2action.metric.metrics.Precision'>, 'f1': <class 'dlc2action.metric.metrics.F1'>, 'recall': <class 'dlc2action.metric.metrics.Recall'>, 'count': <class 'dlc2action.metric.metrics.Count'>, 'segmental_precision': <class 'dlc2action.metric.metrics.SegmentalPrecision'>, 'segmental_recall': <class 'dlc2action.metric.metrics.SegmentalRecall'>, 'segmental_f1': <class 'dlc2action.metric.metrics.SegmentalF1'>, 'edit_distance': <class 'dlc2action.metric.metrics.EditDistance'>, 'f_beta': <class 'dlc2action.metric.metrics.Fbeta'>, 'segmental_f_beta': <class 'dlc2action.metric.metrics.SegmentalFbeta'>, 'semisegmental_precision': <class 'dlc2action.metric.metrics.SemiSegmentalPrecision'>, 'semisegmental_recall': <class 'dlc2action.metric.metrics.SemiSegmentalRecall'>, 'semisegmental_f1': <class 'dlc2action.metric.metrics.SemiSegmentalF1'>, 'pr-auc': <class 'dlc2action.metric.metrics.PR_AUC'>, 'semisegmental_pr-auc': <class 'dlc2action.metric.metrics.SemiSegmentalPR_AUC'>, 'mAP': <class 'dlc2action.metric.metrics.mAP'>}
metrics_minimize = ['edit_distance']
metrics_no_direction = ['count']
optimizers = {'Adam': <class 'torch.optim.adam.Adam'>, 'SGD': <class 'torch.optim.sgd.SGD'>}
models = {'ms_tcn3': <class 'dlc2action.model.ms_tcn.MS_TCN3'>, 'asformer': <class 'dlc2action.model.asformer.ASFormer'>, 'mlp': <class 'dlc2action.model.mlp.MLP'>, 'c2f_tcn': <class 'dlc2action.model.c2f_tcn.C2F_TCN'>, 'edtcn': <class 'dlc2action.model.edtcn.EDTCN'>, 'transformer': <class 'dlc2action.model.transformer.Transformer'>, 'c2f_transformer': <class 'dlc2action.model.c2f_transformer.C2F_Transformer'>, 'motionbert': <class 'dlc2action.model.motionbert.MotionBERT'>}
blanks = ['dataset_inverse_weights', 'dataset_proportional_weights', 'dataset_classes', 'dataset_features', 'dataset_len_segment', 'dataset_bodyparts', 'dataset_boundary_weight', 'model_features']
extractor_to_transformer = {'kinematic': 'kinematic', 'heatmap': 'heatmap'}
partition_methods = {'random': ['random', 'random:test-from-name', 'random:test-from-name:{name}', 'random:equalize:segments', 'random:equalize:videos'], 'fixed': ['val-from-name:{val_name}:test-from-name:{test_name}', 'time', 'time:start-from:{frac}', 'time:start-from:{frac}:strict', 'time:strict', 'file', 'folders']}
basic_parameters = {'data': ['data_suffix', 'feature_suffix', 'annotation_suffix', 'canvas_shape', 'ignored_bodyparts', 'likelihood_threshold', 'behaviors', 'filter_annotated', 'filter_background', 'visibility_min_score', 'visibility_min_frac'], 'augmentations': {'heatmap': ['augmentations', 'rotation_degree_limits'], 'kinematic': ['augmentations', 'rotation_limits', 'mirror_dim', 'noise_std', 'zoom_limits', 'masking_probability']}, 'features': {'heatmap': ['keys', 'channel_policy', 'heatmap_width', 'sigma'], 'kinematic': ['keys', 'averaging_window', 'distance_pairs', 'angle_pairs', 'zone_vertices', 'zone_bools', 'zone_distances', 'area_vertices']}, 'model': {'asformer': ['num_decoders', 'num_layers', 'r1', 'r2', 'num_f_maps', 'channel_masking_rate'], 'c2f_tcn': ['num_f_maps', 'feature_dim'], 'c2f_transformer': ['num_f_maps', 'feature_dim', 'heads'], 'edtcn': ['kernel_size', 'mid_channels'], 'mlp': ['f_maps_list', 'dropout_rates'], 'ms_tcn3': ['num_layers_PG', 'num_layers_R', 'num_R', 'num_f_maps', 'shared_weights'], 'transformer': ['num_f_maps', 'N', 'heads', 'num_pool']}, 'general': ['model_name', 'metric_functions', 'ignored_clips', 'len_segment', 'overlap', 'interactive'], 'losses': {'ms_tcn': ['focal', 'gamma', 'alpha']}, 'metrics': {'f1': ['average', 'ignored_classes', 'threshold_value'], 'precision': ['average', 'ignored_classes', 'threshold_value'], 'recall': ['average', 'ignored_classes', 'threshold_value'], 'f_beta': ['average', 'ignored_classes', 'threshold_value', 'beta'], 'count': ['classes'], 'segmental_precision': ['average', 'ignored_classes', 'threshold_value', 'iou_threshold'], 'segmental_recall': ['average', 'ignored_classes', 'threshold_value', 'iou_threshold'], 'segmental_f1': ['average', 'ignored_classes', 'threshold_value', 'iou_threshold'], 'segmental_f_beta': ['average', 'ignored_classes', 'threshold_value', 'iou_threshold'], 'pr-auc': ['average', 'ignored_classes', 'threshold_step'], 'mAP': ['average', 'ignored_classes', 'iou_threshold', 'threshold_value'], 'semisegmental_precision': ['average', 'ignored_classes', 'iou_threshold'], 'semisegmental_recall': ['average', 'ignored_classes', 'iou_threshold'], 'semisegmental_f1': ['average', 'ignored_classes', 'iou_threshold']}, 'training': ['lr', 'device', 'num_epochs', 'to_ram', 'batch_size', 'normalize', 'temporal_subsampling_size', 'parallel', 'val_frac', 'test_frac', 'partition_method']}
model_hyperparameters = {'asformer': {'losses/ms_tcn/alpha': ('float_log', 1e-05, 0.01), 'losses/ms_tcn/focal': ('categorical', [True, False]), 'training/temporal_subsampling_size': ('float', 0.75, 1), 'model/num_decoders': ('int', 1, 4), 'model/num_f_maps': ('categorical', [32, 64, 128]), 'model/num_layers': ('int', 5, 10), 'model/channel_masking_rate': ('float', 0.2, 0.4), 'general/len_segment': ('categorical', [256, 512, 1024, 2048]), 'losses/ms_tcn/weights': ('categorical', [None, 'dataset_inverse_weights'])}, 'c2f_tcn': {'losses/ms_tcn/alpha': ('float_log', 1e-05, 0.01), 'losses/ms_tcn/focal': ('categorical', [True, False]), 'training/temporal_subsampling_size': ('float', 0.75, 1), 'model/num_f_maps': ('int_log', 32, 128), 'general/len_segment': ('categorical', [512, 1024, 2048]), 'losses/ms_tcn/weights': ('categorical', [None, 'dataset_inverse_weights'])}, 'c2f_transformer': {'losses/ms_tcn/alpha': ('float_log', 1e-05, 0.01), 'losses/ms_tcn/focal': ('categorical', [True, False]), 'training/temporal_subsampling_size': ('float', 0.75, 1), 'model/num_f_maps': ('categorical', [32, 64, 128]), 'model/heads': ('categorical', [1, 2, 4, 8]), 'general/len_segment': ('categorical', [512, 1024, 2048]), 'losses/ms_tcn/weights': ('categorical', [None, 'dataset_inverse_weights'])}, 'edtcn': {'losses/ms_tcn/alpha': ('float_log', 1e-05, 0.01), 'losses/ms_tcn/focal': ('categorical', [True, False]), 'training/temporal_subsampling_size': ('float', 0.75, 1), 'general/len_segment': ('categorical', [256, 512, 1024, 2048]), 'losses/ms_tcn/weights': ('categorical', [None, 'dataset_inverse_weights'])}, 'ms_tcn3': {'losses/ms_tcn/alpha': ('float_log', 1e-05, 0.01), 'losses/ms_tcn/focal': ('categorical', [True, False]), 'training/temporal_subsampling_size': ('float', 0.75, 1), 'model/num_layers_PG': ('int', 5, 20), 'model/shared_weights': ('categorical', [True, False]), 'model/num_layers_R': ('int', 5, 10), 'model/num_f_maps': ('int_log', 32, 128), 'general/len_segment': ('categorical', [256, 512, 1024, 2048]), 'losses/ms_tcn/weights': ('categorical', [None, 'dataset_inverse_weights'])}, 'transformer': {'losses/ms_tcn/alpha': ('float_log', 1e-05, 0.01), 'losses/ms_tcn/focal': ('categorical', [True, False]), 'training/temporal_subsampling_size': ('float', 0.75, 1), 'model/N': ('int', 5, 12), 'model/heads': ('categorical', [1, 2, 4, 8]), 'model/num_pool': ('int', 0, 4), 'model/add_batchnorm': ('categorical', [True, False]), 'general/len_segment': ('categorical', [256, 512, 1024, 2048]), 'losses/ms_tcn/weights': ('categorical', [None, 'dataset_inverse_weights'])}, 'mlp': {'losses/ms_tcn/alpha': ('float_log', 1e-05, 0.01), 'losses/ms_tcn/focal': ('categorical', [True, False]), 'training/temporal_subsampling_size': ('float', 0.75, 1), 'model/dropout_rates': ('float', 0.3, 0.6), 'losses/ms_tcn/weights': ('categorical', [None, 'dataset_inverse_weights'])}}
dlc2action_colormaps = {'default': ['#BBBBBF', '#99d096', '#ea678e', '#f9ba5b', '#639cd2', '#F1F285', '#B16CB9', '#ABE3CE', '#DD98A5', '#C44F53', '#BCC144', '#D6AF85', '#BBBBBF']}