dlc2action.data.base_store
Abstract parent classes for the data store objects.
1# 2# Copyright 2020-present by A. Mathis Group and contributors. All rights reserved. 3# 4# This project and all its files are licensed under GNU AGPLv3 or later version. 5# A copy is included in dlc2action/LICENSE.AGPL. 6# 7"""Abstract parent classes for the data store objects.""" 8 9import inspect 10import os.path 11from abc import ABC, abstractmethod 12from typing import Dict, List, Set, Tuple, Union 13 14import numpy as np 15import torch 16 17 18class Store(ABC): # + 19 """A general parent class for `BehaviorStore` and `InputStore`. 20 21 Processes input video information and generates ordered arrays of data samples and corresponding unique 22 original coordinates, as well as some meta objects. 23 It is assumed that the input videos are separated into clips (e.g. corresponding to different individuals). 24 Each video and each clip inside the video has a unique id (video_id and clip_id, correspondingly). 25 The original coordinates object contains information about the video_id, clip_id and start time of the 26 samples in the original input data. 27 A Store has to be fully defined with a tuple of key objects. 28 The data array can be accessed with integer indices. 29 The samples can be stored as a tensor or TensorDict in RAM or as an array of file paths to be loaded on runtime. 30 """ 31 32 @abstractmethod 33 def __len__(self) -> int: 34 """Get the number of available samples. 35 36 Returns 37 ------- 38 length : int 39 the number of available samples 40 41 """ 42 43 @abstractmethod 44 def remove(self, indices: List) -> None: 45 """Remove the samples corresponding to indices. 46 47 Parameters 48 ---------- 49 indices : int 50 a list of integer indices to remove 51 52 """ 53 54 @abstractmethod 55 def key_objects(self) -> Tuple: 56 """Return a tuple of the key objects necessary to re-create the Store. 57 58 Returns 59 ------- 60 key_objects : tuple 61 a tuple of key objects 62 63 """ 64 65 @abstractmethod 66 def load_from_key_objects(self, key_objects: Tuple) -> None: 67 """Load the information from a tuple of key objects. 68 69 Parameters 70 ---------- 71 key_objects : tuple 72 a tuple of key objects 73 74 """ 75 76 @abstractmethod 77 def to_ram(self) -> None: 78 """Transfer the data samples to RAM if they were previously stored as file paths.""" 79 80 @abstractmethod 81 def get_original_coordinates(self) -> np.ndarray: 82 """Return the original coordinates array. 83 84 Returns 85 ------- 86 np.ndarray 87 an array that contains the coordinates of the data samples in original input data (video id, clip id, 88 start frame) 89 90 """ 91 92 @abstractmethod 93 def create_subsample(self, indices: List, ssl_indices: List = None): 94 """Create a new store that contains a subsample of the data. 95 96 Parameters 97 ---------- 98 indices : list 99 the indices to be included in the subsample 100 ssl_indices : list, optional 101 the indices to be included in the subsample without the annotation data 102 103 """ 104 105 @classmethod 106 @abstractmethod 107 def get_file_ids(cls, *args, **kwargs) -> List: 108 """Get a list of ids. 109 110 Process data parameters and return a list of ids of the videos that should 111 be processed by the `__init__` function. 112 113 Parameters 114 ---------- 115 *args 116 positional arguments 117 **kwargs 118 keyword arguments 119 120 Returns 121 ------- 122 video_ids : list 123 a list of video file ids 124 125 """ 126 127 @classmethod 128 def get_parameters(cls) -> List: 129 """Generate a list of parameter names for the `__init__` function. 130 131 Returns 132 ------- 133 parameter_names: list 134 a list of necessary parameter names 135 136 """ 137 return inspect.getfullargspec(cls.__init__).args 138 139 @classmethod 140 def new(cls): 141 """Create a new instance of the same class. 142 143 Returns 144 ------- 145 new_instance : Store 146 a new instance of the same class 147 148 """ 149 return cls() 150 151 152class InputStore(Store): # + 153 """A class that generates model input data from video information and stores it. 154 155 Processes input video information and generates ordered arrays of data samples and corresponding unique 156 original coordinates, as well as some meta objects. 157 It is assumed that the input videos are separated into clips (e.g. corresponding to different individuals). 158 Each video and each clip inside the video has a unique id (video_id and clip_id, correspondingly). 159 The original coordinates object contains information about the video_id, clip_id and start time of the 160 samples in the original input data. 161 An InputStore has to be fully defined with a tuple of key objects. 162 The data array can be accessed with integer indices. 163 The samples can be stored as a TensorDict in RAM or as an array of file paths to be loaded on runtime. 164 When no arguments are passed a blank class instance should be created that can later be filled with 165 information from key objects 166 """ 167 168 @abstractmethod 169 def __init__( 170 self, 171 video_order: List = None, 172 key_objects: Tuple = None, 173 data_path: Union[str, List] = None, 174 file_paths: List = None, 175 feature_save_path: str = None, 176 feature_extraction_pars: Dict = None, 177 *args, 178 **kwargs 179 ): 180 """Initialize a class instance. 181 182 Parameters 183 ---------- 184 video_order : list, optional 185 a list of video ids that should be processed in the same order (not passed if creating from key objects) 186 key_objects : tuple, optional 187 a tuple of key objects 188 data_path : str | set, optional 189 the path to the folder where the pose and feature files are stored or a set of such paths 190 (not passed if creating from key objects or from `file_paths`) 191 file_paths : set, optional 192 a set of string paths to the pose and feature files 193 (not passed if creating from key objects or from `data_path`) 194 feature_save_path : str, optional 195 the path to the folder where pre-processed files are stored (not passed if creating from key objects) 196 feature_extraction_pars : dict, optional 197 a dictionary of feature extraction parameters (not passed if creating from key objects) 198 199 """ 200 if key_objects is not None: 201 self.load_from_key_objects(key_objects) 202 203 @abstractmethod 204 def __getitem__(self, ind: int) -> Dict: 205 """Return the sample corresponding to an index. 206 207 Parameters 208 ---------- 209 ind : int 210 index of the sample 211 212 Returns 213 ------- 214 sample : dict 215 the corresponding sample (a dictionary of features) 216 217 """ 218 219 @abstractmethod 220 def get_video_id(self, coords: Tuple) -> str: 221 """Get the video id from an element of original coordinates. 222 223 Parameters 224 ---------- 225 coords : tuple 226 an element of the original coordinates array 227 228 Returns 229 ------- 230 video_id: str 231 the id of the video that the coordinates point to 232 233 """ 234 235 @abstractmethod 236 def get_clip_id(self, coords: Tuple) -> str: 237 """Get the clip id from an element of original coordinates. 238 239 Parameters 240 ---------- 241 coords : tuple 242 an element of the original coordinates array 243 244 Returns 245 ------- 246 clip_id : str 247 the id of the clip that the coordinates point to 248 249 """ 250 251 @abstractmethod 252 def get_clip_length(self, video_id: str, clip_id: str) -> int: 253 """Get the clip length from the id. 254 255 Parameters 256 ---------- 257 video_id : str 258 the video id 259 clip_id : str 260 the clip id 261 262 Returns 263 ------- 264 clip_length : int 265 the length of the clip 266 267 """ 268 269 @abstractmethod 270 def get_clip_start_end(self, coords: Tuple) -> (int, int): 271 """Get the clip start and end frames from an element of original coordinates. 272 273 Parameters 274 ---------- 275 coords : tuple 276 an element of original coordinates array 277 278 Returns 279 ------- 280 start: int 281 the start frame of the clip that the coordinates point to 282 end : int 283 the end frame of the clip that the coordinates point to 284 285 """ 286 287 @abstractmethod 288 def get_clip_start(self, video_id: str, clip_id: str) -> int: 289 """Get the clip start frame from the video id and the clip id. 290 291 Parameters 292 ---------- 293 video_id : str 294 the video id 295 clip_id : str 296 the clip id 297 298 Returns 299 ------- 300 clip_start : int 301 the start frame of the clip 302 303 """ 304 305 @abstractmethod 306 def get_visibility( 307 self, video_id: str, clip_id: str, start: int, end: int, score: float 308 ) -> float: 309 """Get the fraction of the frames in that have a visibility score better than a hard_threshold. 310 311 For example, in the case of keypoint data the visibility score can be the number of identified keypoints. 312 313 Parameters 314 ---------- 315 video_id : str 316 the video id of the frames 317 clip_id : str 318 the clip id of the frames 319 start : int 320 the start frame 321 end : int 322 the end frame 323 score : float 324 the visibility score hard_threshold 325 326 Returns 327 ------- 328 frac_visible: float 329 the fraction of frames with visibility above the hard_threshold 330 331 """ 332 333 @abstractmethod 334 def get_annotation_objects(self) -> Dict: 335 """Get a dictionary of objects necessary to create an `BehaviorStore`. 336 337 Returns 338 ------- 339 annotation_objects : dict 340 a dictionary of objects to be passed to the BehaviorStore constructor where the keys are the names of 341 the objects 342 343 """ 344 345 @abstractmethod 346 def get_folder(self, video_id: str) -> str: 347 """Get the input folder that the file with this video id was read from. 348 349 Parameters 350 ---------- 351 video_id : str 352 the video id 353 354 Returns 355 ------- 356 folder : str 357 the path to the directory that contains the input file associated with the video id 358 359 """ 360 361 def get_clip_length_from_coords(self, coords: Tuple) -> int: 362 """Get the length of a clip from an element of the original coordinates array. 363 364 Parameters 365 ---------- 366 coords : tuple 367 an element of the original coordinates array 368 369 Returns 370 ------- 371 clip_length : int 372 the length of the clip 373 374 """ 375 v_id = self.get_video_id(coords) 376 clip_id = self.get_clip_id(coords) 377 l = self.get_clip_length(v_id, clip_id) 378 return l 379 380 def get_folder_order(self) -> List: 381 """Get a list of folders corresponding to the data samples. 382 383 Returns 384 ------- 385 folder_order : list 386 a list of string folder basenames corresponding to the data samples (e.g. 'folder2' 387 if the corresponding file was read from '/path/to/folder1/folder2') 388 389 """ 390 return [os.path.basename(self.get_folder(x)) for x in self.get_video_id_order()] 391 392 def get_video_id_order(self) -> List: 393 """Get a list of video ids corresponding to the data samples. 394 395 Returns 396 ------- 397 video_id_order : list 398 a list of string names of the video ids corresponding to the data samples 399 400 """ 401 return [self.get_video_id(x) for x in self.get_original_coordinates()] 402 403 def get_tag(self, idx: int) -> Union[int, None]: 404 """Return a tag object corresponding to an index. 405 406 Tags can carry meta information (like annotator id) and are accepted by models that require 407 that information and by metrics (some metrics have options for averaging over the tags). 408 When a tag is `None`, it is not passed to the model. 409 410 Parameters 411 ---------- 412 idx : int 413 the index 414 415 Returns 416 ------- 417 tag : int 418 the tag index 419 420 """ 421 return None 422 423 def get_indices(self, tag: int) -> List: 424 """Get a list of indices of samples that have a specific meta tag. 425 426 Parameters 427 ---------- 428 tag : int 429 the meta tag for the subsample (`None` for the whole dataset) 430 431 Returns 432 ------- 433 indices : list 434 a list of indices that meet the criteria 435 436 """ 437 return list(range(len(self))) 438 439 def get_tags(self) -> List: 440 """Get a list of all meta tags. 441 442 Returns 443 ------- 444 tags: List 445 a list of unique meta tag values 446 447 """ 448 return [None] 449 450 451class BehaviorStore(Store): 452 """A class that generates annotation from video information and stores it. 453 454 Processes input video information and generates ordered arrays of annotation samples and corresponding unique 455 original coordinates, as well as some meta objects. 456 It is assumed that the input videos are separated into clips (e.g. corresponding to different individuals). 457 Each video and each clip inside the video has a unique id (video_id and clip_id, correspondingly). 458 The original coordinates object contains information about the video_id, clip_id and start time of the 459 samples in the original input data. 460 An BehaviorStore has to be fully defined with a tuple of key objects. 461 The annotation array can be accessed with integer indices. 462 The samples can be stored as a torch.Tensor in RAM or as an array of file paths to be loaded on runtime. 463 When no arguments are passed a blank class instance should be created that can later be filled with 464 information from key objects 465 """ 466 467 required_objects = [] 468 """ 469 A list of string names of the objects required from the input store. 470 """ 471 472 @abstractmethod 473 def __init__( 474 self, 475 video_order: List = None, 476 key_objects: Tuple = None, 477 annotation_path: Union[str, Set] = None, 478 *args, 479 **kwargs 480 ): 481 """Initialize the class instance. 482 483 Parameters 484 ---------- 485 video_order : list, optional 486 a list of video ids that should be processed in the same order (not passed if creating from key objects) 487 key_objects : tuple, optional 488 a tuple of key objects 489 annotation_path : str | set, optional 490 the path or the set of paths to the folder where the annotation files are stored (not passed if creating 491 from key objects) 492 493 """ 494 if key_objects is not None: 495 self.load_from_key_objects(key_objects) 496 497 @abstractmethod 498 def __getitem__(self, ind: int) -> torch.Tensor: 499 """Return the annotation of the sample corresponding to an index. 500 501 Parameters 502 ---------- 503 ind : int 504 index of the sample 505 506 Returns 507 ------- 508 sample : torch.Tensor 509 the corresponding annotation tensor 510 511 """ 512 513 @abstractmethod 514 def get_len(self, return_unlabeled: bool) -> int: 515 """Get the length of the subsample of labeled/unlabeled data. 516 517 If return_unlabeled is True, the index is in the subsample of unlabeled data, if False in labeled 518 and if return_unlabeled is None the index is already correct 519 520 Parameters 521 ---------- 522 return_unlabeled : bool 523 the identifier for the subsample 524 525 Returns 526 ------- 527 length : int 528 the length of the subsample 529 530 """ 531 532 @abstractmethod 533 def count_classes( 534 self, frac: bool = False, zeros: bool = False, bouts: bool = False 535 ) -> Dict: 536 """Get a dictionary with class-wise frame counts. 537 538 Parameters 539 ---------- 540 frac : bool, default False 541 if `True`, a fraction of the total frame count is returned 542 zeros : bool. default False 543 if `True`, the number of known negative samples is counted (only if the annotation is multi-label) 544 bouts : bool, default False 545 if `True`, instead of frame counts segment counts are returned 546 547 Returns 548 ------- 549 count_dictionary : dict 550 a dictionary with class indices as keys and frame counts as values 551 552 """ 553 554 @abstractmethod 555 def behaviors_dict(self) -> Dict: 556 """Get a dictionary of class names. 557 558 Returns 559 ------- 560 behavior_dictionary: dict 561 a dictionary with class indices as keys and class names as values 562 563 """ 564 565 @abstractmethod 566 def annotation_class(self) -> str: 567 """Get the type of annotation ('exclusive_classification', 'nonexclusive_classification', more coming soon). 568 569 Returns 570 ------- 571 annotation_class : str 572 the type of annotation 573 574 """ 575 576 @abstractmethod 577 def size(self) -> int: 578 """Get the total number of frames in the data. 579 580 Returns 581 ------- 582 size : int 583 the total number of frames 584 585 """ 586 587 @abstractmethod 588 def filtered_indices(self) -> List: 589 """Return the indices of the samples that should be removed. 590 591 Choosing the indices can be based on any kind of filering defined in the __init__ function by the data 592 parameters 593 594 Returns 595 ------- 596 indices_to_remove : list 597 a list of integer indices that should be removed 598 599 """ 600 601 @abstractmethod 602 def set_pseudo_labels(self, labels: torch.Tensor) -> None: 603 """Set pseudo labels to the unlabeled data. 604 605 Parameters 606 ---------- 607 labels : torch.Tensor 608 a tensor of pseudo-labels for the unlabeled data 609 610 """ 611 612 613class PoseInputStore(InputStore): 614 """A subclass of `InputStore` for pose estimation data. 615 616 Contains methods used by pose estimation feature extractors. 617 All methods receive a data dictionary as input. This dictionary is the same as what is passed to the 618 feature extractor and the only limitations for the structure are that it has to relate to one video id 619 and have clip ids as keys. Read the documentation at `dlc2action.data` to find out more about videos 620 and clips. 621 """ 622 623 def get_likelihood( 624 self, data_dict: Dict, clip_id: str, bodypart: str 625 ) -> Union[np.ndarray, None]: 626 """Get the likelihood values. 627 628 Parameters 629 ---------- 630 data_dict : dict 631 the data dictionary 632 clip_id : str 633 the clip id 634 bodypart : str 635 the name of the body part 636 637 Returns 638 ------- 639 likelihoods: np.ndarrray | None 640 `None` if the dataset doesn't have likelihoods or an array of shape (#timestamps) 641 642 """ 643 return None 644 645 @abstractmethod 646 def get_coords(self, data_dict: Dict, clip_id: str, bodypart: str) -> np.ndarray: 647 """Get the coordinates array of a specific body part in a specific clip. 648 649 Parameters 650 ---------- 651 data_dict : dict 652 the data dictionary 653 clip_id : str 654 the clip id 655 bodypart : str 656 the name of the body part 657 658 Returns 659 ------- 660 coords : np.ndarray 661 the coordinates array of shape (#timesteps, #coordinates) 662 663 """ 664 665 @abstractmethod 666 def get_bodyparts(self) -> List: 667 """Get a list of bodypart names. 668 669 Returns 670 ------- 671 bodyparts : list 672 a list of string or integer body part names 673 674 """ 675 676 @abstractmethod 677 def get_n_frames(self, data_dict: Dict, clip_id: str) -> int: 678 """Get the length of the clip. 679 680 Parameters 681 ---------- 682 data_dict : dict 683 the data dictionary 684 clip_id : str 685 the clip id 686 687 Returns 688 ------- 689 n_frames : int 690 the length of the clip 691 692 """
19class Store(ABC): # + 20 """A general parent class for `BehaviorStore` and `InputStore`. 21 22 Processes input video information and generates ordered arrays of data samples and corresponding unique 23 original coordinates, as well as some meta objects. 24 It is assumed that the input videos are separated into clips (e.g. corresponding to different individuals). 25 Each video and each clip inside the video has a unique id (video_id and clip_id, correspondingly). 26 The original coordinates object contains information about the video_id, clip_id and start time of the 27 samples in the original input data. 28 A Store has to be fully defined with a tuple of key objects. 29 The data array can be accessed with integer indices. 30 The samples can be stored as a tensor or TensorDict in RAM or as an array of file paths to be loaded on runtime. 31 """ 32 33 @abstractmethod 34 def __len__(self) -> int: 35 """Get the number of available samples. 36 37 Returns 38 ------- 39 length : int 40 the number of available samples 41 42 """ 43 44 @abstractmethod 45 def remove(self, indices: List) -> None: 46 """Remove the samples corresponding to indices. 47 48 Parameters 49 ---------- 50 indices : int 51 a list of integer indices to remove 52 53 """ 54 55 @abstractmethod 56 def key_objects(self) -> Tuple: 57 """Return a tuple of the key objects necessary to re-create the Store. 58 59 Returns 60 ------- 61 key_objects : tuple 62 a tuple of key objects 63 64 """ 65 66 @abstractmethod 67 def load_from_key_objects(self, key_objects: Tuple) -> None: 68 """Load the information from a tuple of key objects. 69 70 Parameters 71 ---------- 72 key_objects : tuple 73 a tuple of key objects 74 75 """ 76 77 @abstractmethod 78 def to_ram(self) -> None: 79 """Transfer the data samples to RAM if they were previously stored as file paths.""" 80 81 @abstractmethod 82 def get_original_coordinates(self) -> np.ndarray: 83 """Return the original coordinates array. 84 85 Returns 86 ------- 87 np.ndarray 88 an array that contains the coordinates of the data samples in original input data (video id, clip id, 89 start frame) 90 91 """ 92 93 @abstractmethod 94 def create_subsample(self, indices: List, ssl_indices: List = None): 95 """Create a new store that contains a subsample of the data. 96 97 Parameters 98 ---------- 99 indices : list 100 the indices to be included in the subsample 101 ssl_indices : list, optional 102 the indices to be included in the subsample without the annotation data 103 104 """ 105 106 @classmethod 107 @abstractmethod 108 def get_file_ids(cls, *args, **kwargs) -> List: 109 """Get a list of ids. 110 111 Process data parameters and return a list of ids of the videos that should 112 be processed by the `__init__` function. 113 114 Parameters 115 ---------- 116 *args 117 positional arguments 118 **kwargs 119 keyword arguments 120 121 Returns 122 ------- 123 video_ids : list 124 a list of video file ids 125 126 """ 127 128 @classmethod 129 def get_parameters(cls) -> List: 130 """Generate a list of parameter names for the `__init__` function. 131 132 Returns 133 ------- 134 parameter_names: list 135 a list of necessary parameter names 136 137 """ 138 return inspect.getfullargspec(cls.__init__).args 139 140 @classmethod 141 def new(cls): 142 """Create a new instance of the same class. 143 144 Returns 145 ------- 146 new_instance : Store 147 a new instance of the same class 148 149 """ 150 return cls()
A general parent class for BehaviorStore and InputStore.
Processes input video information and generates ordered arrays of data samples and corresponding unique original coordinates, as well as some meta objects. It is assumed that the input videos are separated into clips (e.g. corresponding to different individuals). Each video and each clip inside the video has a unique id (video_id and clip_id, correspondingly). The original coordinates object contains information about the video_id, clip_id and start time of the samples in the original input data. A Store has to be fully defined with a tuple of key objects. The data array can be accessed with integer indices. The samples can be stored as a tensor or TensorDict in RAM or as an array of file paths to be loaded on runtime.
44 @abstractmethod 45 def remove(self, indices: List) -> None: 46 """Remove the samples corresponding to indices. 47 48 Parameters 49 ---------- 50 indices : int 51 a list of integer indices to remove 52 53 """
Remove the samples corresponding to indices.
Parameters
indices : int a list of integer indices to remove
55 @abstractmethod 56 def key_objects(self) -> Tuple: 57 """Return a tuple of the key objects necessary to re-create the Store. 58 59 Returns 60 ------- 61 key_objects : tuple 62 a tuple of key objects 63 64 """
Return a tuple of the key objects necessary to re-create the Store.
Returns
key_objects : tuple a tuple of key objects
66 @abstractmethod 67 def load_from_key_objects(self, key_objects: Tuple) -> None: 68 """Load the information from a tuple of key objects. 69 70 Parameters 71 ---------- 72 key_objects : tuple 73 a tuple of key objects 74 75 """
Load the information from a tuple of key objects.
Parameters
key_objects : tuple a tuple of key objects
77 @abstractmethod 78 def to_ram(self) -> None: 79 """Transfer the data samples to RAM if they were previously stored as file paths."""
Transfer the data samples to RAM if they were previously stored as file paths.
81 @abstractmethod 82 def get_original_coordinates(self) -> np.ndarray: 83 """Return the original coordinates array. 84 85 Returns 86 ------- 87 np.ndarray 88 an array that contains the coordinates of the data samples in original input data (video id, clip id, 89 start frame) 90 91 """
Return the original coordinates array.
Returns
np.ndarray an array that contains the coordinates of the data samples in original input data (video id, clip id, start frame)
93 @abstractmethod 94 def create_subsample(self, indices: List, ssl_indices: List = None): 95 """Create a new store that contains a subsample of the data. 96 97 Parameters 98 ---------- 99 indices : list 100 the indices to be included in the subsample 101 ssl_indices : list, optional 102 the indices to be included in the subsample without the annotation data 103 104 """
Create a new store that contains a subsample of the data.
Parameters
indices : list the indices to be included in the subsample ssl_indices : list, optional the indices to be included in the subsample without the annotation data
106 @classmethod 107 @abstractmethod 108 def get_file_ids(cls, *args, **kwargs) -> List: 109 """Get a list of ids. 110 111 Process data parameters and return a list of ids of the videos that should 112 be processed by the `__init__` function. 113 114 Parameters 115 ---------- 116 *args 117 positional arguments 118 **kwargs 119 keyword arguments 120 121 Returns 122 ------- 123 video_ids : list 124 a list of video file ids 125 126 """
Get a list of ids.
Process data parameters and return a list of ids of the videos that should
be processed by the __init__ function.
Parameters
args positional arguments *kwargs keyword arguments
Returns
video_ids : list a list of video file ids
128 @classmethod 129 def get_parameters(cls) -> List: 130 """Generate a list of parameter names for the `__init__` function. 131 132 Returns 133 ------- 134 parameter_names: list 135 a list of necessary parameter names 136 137 """ 138 return inspect.getfullargspec(cls.__init__).args
Generate a list of parameter names for the __init__ function.
Returns
parameter_names: list a list of necessary parameter names
140 @classmethod 141 def new(cls): 142 """Create a new instance of the same class. 143 144 Returns 145 ------- 146 new_instance : Store 147 a new instance of the same class 148 149 """ 150 return cls()
Create a new instance of the same class.
Returns
new_instance : Store a new instance of the same class
153class InputStore(Store): # + 154 """A class that generates model input data from video information and stores it. 155 156 Processes input video information and generates ordered arrays of data samples and corresponding unique 157 original coordinates, as well as some meta objects. 158 It is assumed that the input videos are separated into clips (e.g. corresponding to different individuals). 159 Each video and each clip inside the video has a unique id (video_id and clip_id, correspondingly). 160 The original coordinates object contains information about the video_id, clip_id and start time of the 161 samples in the original input data. 162 An InputStore has to be fully defined with a tuple of key objects. 163 The data array can be accessed with integer indices. 164 The samples can be stored as a TensorDict in RAM or as an array of file paths to be loaded on runtime. 165 When no arguments are passed a blank class instance should be created that can later be filled with 166 information from key objects 167 """ 168 169 @abstractmethod 170 def __init__( 171 self, 172 video_order: List = None, 173 key_objects: Tuple = None, 174 data_path: Union[str, List] = None, 175 file_paths: List = None, 176 feature_save_path: str = None, 177 feature_extraction_pars: Dict = None, 178 *args, 179 **kwargs 180 ): 181 """Initialize a class instance. 182 183 Parameters 184 ---------- 185 video_order : list, optional 186 a list of video ids that should be processed in the same order (not passed if creating from key objects) 187 key_objects : tuple, optional 188 a tuple of key objects 189 data_path : str | set, optional 190 the path to the folder where the pose and feature files are stored or a set of such paths 191 (not passed if creating from key objects or from `file_paths`) 192 file_paths : set, optional 193 a set of string paths to the pose and feature files 194 (not passed if creating from key objects or from `data_path`) 195 feature_save_path : str, optional 196 the path to the folder where pre-processed files are stored (not passed if creating from key objects) 197 feature_extraction_pars : dict, optional 198 a dictionary of feature extraction parameters (not passed if creating from key objects) 199 200 """ 201 if key_objects is not None: 202 self.load_from_key_objects(key_objects) 203 204 @abstractmethod 205 def __getitem__(self, ind: int) -> Dict: 206 """Return the sample corresponding to an index. 207 208 Parameters 209 ---------- 210 ind : int 211 index of the sample 212 213 Returns 214 ------- 215 sample : dict 216 the corresponding sample (a dictionary of features) 217 218 """ 219 220 @abstractmethod 221 def get_video_id(self, coords: Tuple) -> str: 222 """Get the video id from an element of original coordinates. 223 224 Parameters 225 ---------- 226 coords : tuple 227 an element of the original coordinates array 228 229 Returns 230 ------- 231 video_id: str 232 the id of the video that the coordinates point to 233 234 """ 235 236 @abstractmethod 237 def get_clip_id(self, coords: Tuple) -> str: 238 """Get the clip id from an element of original coordinates. 239 240 Parameters 241 ---------- 242 coords : tuple 243 an element of the original coordinates array 244 245 Returns 246 ------- 247 clip_id : str 248 the id of the clip that the coordinates point to 249 250 """ 251 252 @abstractmethod 253 def get_clip_length(self, video_id: str, clip_id: str) -> int: 254 """Get the clip length from the id. 255 256 Parameters 257 ---------- 258 video_id : str 259 the video id 260 clip_id : str 261 the clip id 262 263 Returns 264 ------- 265 clip_length : int 266 the length of the clip 267 268 """ 269 270 @abstractmethod 271 def get_clip_start_end(self, coords: Tuple) -> (int, int): 272 """Get the clip start and end frames from an element of original coordinates. 273 274 Parameters 275 ---------- 276 coords : tuple 277 an element of original coordinates array 278 279 Returns 280 ------- 281 start: int 282 the start frame of the clip that the coordinates point to 283 end : int 284 the end frame of the clip that the coordinates point to 285 286 """ 287 288 @abstractmethod 289 def get_clip_start(self, video_id: str, clip_id: str) -> int: 290 """Get the clip start frame from the video id and the clip id. 291 292 Parameters 293 ---------- 294 video_id : str 295 the video id 296 clip_id : str 297 the clip id 298 299 Returns 300 ------- 301 clip_start : int 302 the start frame of the clip 303 304 """ 305 306 @abstractmethod 307 def get_visibility( 308 self, video_id: str, clip_id: str, start: int, end: int, score: float 309 ) -> float: 310 """Get the fraction of the frames in that have a visibility score better than a hard_threshold. 311 312 For example, in the case of keypoint data the visibility score can be the number of identified keypoints. 313 314 Parameters 315 ---------- 316 video_id : str 317 the video id of the frames 318 clip_id : str 319 the clip id of the frames 320 start : int 321 the start frame 322 end : int 323 the end frame 324 score : float 325 the visibility score hard_threshold 326 327 Returns 328 ------- 329 frac_visible: float 330 the fraction of frames with visibility above the hard_threshold 331 332 """ 333 334 @abstractmethod 335 def get_annotation_objects(self) -> Dict: 336 """Get a dictionary of objects necessary to create an `BehaviorStore`. 337 338 Returns 339 ------- 340 annotation_objects : dict 341 a dictionary of objects to be passed to the BehaviorStore constructor where the keys are the names of 342 the objects 343 344 """ 345 346 @abstractmethod 347 def get_folder(self, video_id: str) -> str: 348 """Get the input folder that the file with this video id was read from. 349 350 Parameters 351 ---------- 352 video_id : str 353 the video id 354 355 Returns 356 ------- 357 folder : str 358 the path to the directory that contains the input file associated with the video id 359 360 """ 361 362 def get_clip_length_from_coords(self, coords: Tuple) -> int: 363 """Get the length of a clip from an element of the original coordinates array. 364 365 Parameters 366 ---------- 367 coords : tuple 368 an element of the original coordinates array 369 370 Returns 371 ------- 372 clip_length : int 373 the length of the clip 374 375 """ 376 v_id = self.get_video_id(coords) 377 clip_id = self.get_clip_id(coords) 378 l = self.get_clip_length(v_id, clip_id) 379 return l 380 381 def get_folder_order(self) -> List: 382 """Get a list of folders corresponding to the data samples. 383 384 Returns 385 ------- 386 folder_order : list 387 a list of string folder basenames corresponding to the data samples (e.g. 'folder2' 388 if the corresponding file was read from '/path/to/folder1/folder2') 389 390 """ 391 return [os.path.basename(self.get_folder(x)) for x in self.get_video_id_order()] 392 393 def get_video_id_order(self) -> List: 394 """Get a list of video ids corresponding to the data samples. 395 396 Returns 397 ------- 398 video_id_order : list 399 a list of string names of the video ids corresponding to the data samples 400 401 """ 402 return [self.get_video_id(x) for x in self.get_original_coordinates()] 403 404 def get_tag(self, idx: int) -> Union[int, None]: 405 """Return a tag object corresponding to an index. 406 407 Tags can carry meta information (like annotator id) and are accepted by models that require 408 that information and by metrics (some metrics have options for averaging over the tags). 409 When a tag is `None`, it is not passed to the model. 410 411 Parameters 412 ---------- 413 idx : int 414 the index 415 416 Returns 417 ------- 418 tag : int 419 the tag index 420 421 """ 422 return None 423 424 def get_indices(self, tag: int) -> List: 425 """Get a list of indices of samples that have a specific meta tag. 426 427 Parameters 428 ---------- 429 tag : int 430 the meta tag for the subsample (`None` for the whole dataset) 431 432 Returns 433 ------- 434 indices : list 435 a list of indices that meet the criteria 436 437 """ 438 return list(range(len(self))) 439 440 def get_tags(self) -> List: 441 """Get a list of all meta tags. 442 443 Returns 444 ------- 445 tags: List 446 a list of unique meta tag values 447 448 """ 449 return [None]
A class that generates model input data from video information and stores it.
Processes input video information and generates ordered arrays of data samples and corresponding unique original coordinates, as well as some meta objects. It is assumed that the input videos are separated into clips (e.g. corresponding to different individuals). Each video and each clip inside the video has a unique id (video_id and clip_id, correspondingly). The original coordinates object contains information about the video_id, clip_id and start time of the samples in the original input data. An InputStore has to be fully defined with a tuple of key objects. The data array can be accessed with integer indices. The samples can be stored as a TensorDict in RAM or as an array of file paths to be loaded on runtime. When no arguments are passed a blank class instance should be created that can later be filled with information from key objects
169 @abstractmethod 170 def __init__( 171 self, 172 video_order: List = None, 173 key_objects: Tuple = None, 174 data_path: Union[str, List] = None, 175 file_paths: List = None, 176 feature_save_path: str = None, 177 feature_extraction_pars: Dict = None, 178 *args, 179 **kwargs 180 ): 181 """Initialize a class instance. 182 183 Parameters 184 ---------- 185 video_order : list, optional 186 a list of video ids that should be processed in the same order (not passed if creating from key objects) 187 key_objects : tuple, optional 188 a tuple of key objects 189 data_path : str | set, optional 190 the path to the folder where the pose and feature files are stored or a set of such paths 191 (not passed if creating from key objects or from `file_paths`) 192 file_paths : set, optional 193 a set of string paths to the pose and feature files 194 (not passed if creating from key objects or from `data_path`) 195 feature_save_path : str, optional 196 the path to the folder where pre-processed files are stored (not passed if creating from key objects) 197 feature_extraction_pars : dict, optional 198 a dictionary of feature extraction parameters (not passed if creating from key objects) 199 200 """ 201 if key_objects is not None: 202 self.load_from_key_objects(key_objects)
Initialize a class instance.
Parameters
video_order : list, optional
a list of video ids that should be processed in the same order (not passed if creating from key objects)
key_objects : tuple, optional
a tuple of key objects
data_path : str | set, optional
the path to the folder where the pose and feature files are stored or a set of such paths
(not passed if creating from key objects or from file_paths)
file_paths : set, optional
a set of string paths to the pose and feature files
(not passed if creating from key objects or from data_path)
feature_save_path : str, optional
the path to the folder where pre-processed files are stored (not passed if creating from key objects)
feature_extraction_pars : dict, optional
a dictionary of feature extraction parameters (not passed if creating from key objects)
220 @abstractmethod 221 def get_video_id(self, coords: Tuple) -> str: 222 """Get the video id from an element of original coordinates. 223 224 Parameters 225 ---------- 226 coords : tuple 227 an element of the original coordinates array 228 229 Returns 230 ------- 231 video_id: str 232 the id of the video that the coordinates point to 233 234 """
Get the video id from an element of original coordinates.
Parameters
coords : tuple an element of the original coordinates array
Returns
video_id: str the id of the video that the coordinates point to
236 @abstractmethod 237 def get_clip_id(self, coords: Tuple) -> str: 238 """Get the clip id from an element of original coordinates. 239 240 Parameters 241 ---------- 242 coords : tuple 243 an element of the original coordinates array 244 245 Returns 246 ------- 247 clip_id : str 248 the id of the clip that the coordinates point to 249 250 """
Get the clip id from an element of original coordinates.
Parameters
coords : tuple an element of the original coordinates array
Returns
clip_id : str the id of the clip that the coordinates point to
252 @abstractmethod 253 def get_clip_length(self, video_id: str, clip_id: str) -> int: 254 """Get the clip length from the id. 255 256 Parameters 257 ---------- 258 video_id : str 259 the video id 260 clip_id : str 261 the clip id 262 263 Returns 264 ------- 265 clip_length : int 266 the length of the clip 267 268 """
Get the clip length from the id.
Parameters
video_id : str the video id clip_id : str the clip id
Returns
clip_length : int the length of the clip
270 @abstractmethod 271 def get_clip_start_end(self, coords: Tuple) -> (int, int): 272 """Get the clip start and end frames from an element of original coordinates. 273 274 Parameters 275 ---------- 276 coords : tuple 277 an element of original coordinates array 278 279 Returns 280 ------- 281 start: int 282 the start frame of the clip that the coordinates point to 283 end : int 284 the end frame of the clip that the coordinates point to 285 286 """
Get the clip start and end frames from an element of original coordinates.
Parameters
coords : tuple an element of original coordinates array
Returns
start: int the start frame of the clip that the coordinates point to end : int the end frame of the clip that the coordinates point to
288 @abstractmethod 289 def get_clip_start(self, video_id: str, clip_id: str) -> int: 290 """Get the clip start frame from the video id and the clip id. 291 292 Parameters 293 ---------- 294 video_id : str 295 the video id 296 clip_id : str 297 the clip id 298 299 Returns 300 ------- 301 clip_start : int 302 the start frame of the clip 303 304 """
Get the clip start frame from the video id and the clip id.
Parameters
video_id : str the video id clip_id : str the clip id
Returns
clip_start : int the start frame of the clip
306 @abstractmethod 307 def get_visibility( 308 self, video_id: str, clip_id: str, start: int, end: int, score: float 309 ) -> float: 310 """Get the fraction of the frames in that have a visibility score better than a hard_threshold. 311 312 For example, in the case of keypoint data the visibility score can be the number of identified keypoints. 313 314 Parameters 315 ---------- 316 video_id : str 317 the video id of the frames 318 clip_id : str 319 the clip id of the frames 320 start : int 321 the start frame 322 end : int 323 the end frame 324 score : float 325 the visibility score hard_threshold 326 327 Returns 328 ------- 329 frac_visible: float 330 the fraction of frames with visibility above the hard_threshold 331 332 """
Get the fraction of the frames in that have a visibility score better than a hard_threshold.
For example, in the case of keypoint data the visibility score can be the number of identified keypoints.
Parameters
video_id : str the video id of the frames clip_id : str the clip id of the frames start : int the start frame end : int the end frame score : float the visibility score hard_threshold
Returns
frac_visible: float the fraction of frames with visibility above the hard_threshold
334 @abstractmethod 335 def get_annotation_objects(self) -> Dict: 336 """Get a dictionary of objects necessary to create an `BehaviorStore`. 337 338 Returns 339 ------- 340 annotation_objects : dict 341 a dictionary of objects to be passed to the BehaviorStore constructor where the keys are the names of 342 the objects 343 344 """
Get a dictionary of objects necessary to create an BehaviorStore.
Returns
annotation_objects : dict a dictionary of objects to be passed to the BehaviorStore constructor where the keys are the names of the objects
346 @abstractmethod 347 def get_folder(self, video_id: str) -> str: 348 """Get the input folder that the file with this video id was read from. 349 350 Parameters 351 ---------- 352 video_id : str 353 the video id 354 355 Returns 356 ------- 357 folder : str 358 the path to the directory that contains the input file associated with the video id 359 360 """
Get the input folder that the file with this video id was read from.
Parameters
video_id : str the video id
Returns
folder : str the path to the directory that contains the input file associated with the video id
362 def get_clip_length_from_coords(self, coords: Tuple) -> int: 363 """Get the length of a clip from an element of the original coordinates array. 364 365 Parameters 366 ---------- 367 coords : tuple 368 an element of the original coordinates array 369 370 Returns 371 ------- 372 clip_length : int 373 the length of the clip 374 375 """ 376 v_id = self.get_video_id(coords) 377 clip_id = self.get_clip_id(coords) 378 l = self.get_clip_length(v_id, clip_id) 379 return l
Get the length of a clip from an element of the original coordinates array.
Parameters
coords : tuple an element of the original coordinates array
Returns
clip_length : int the length of the clip
381 def get_folder_order(self) -> List: 382 """Get a list of folders corresponding to the data samples. 383 384 Returns 385 ------- 386 folder_order : list 387 a list of string folder basenames corresponding to the data samples (e.g. 'folder2' 388 if the corresponding file was read from '/path/to/folder1/folder2') 389 390 """ 391 return [os.path.basename(self.get_folder(x)) for x in self.get_video_id_order()]
Get a list of folders corresponding to the data samples.
Returns
folder_order : list a list of string folder basenames corresponding to the data samples (e.g. 'folder2' if the corresponding file was read from '/path/to/folder1/folder2')
393 def get_video_id_order(self) -> List: 394 """Get a list of video ids corresponding to the data samples. 395 396 Returns 397 ------- 398 video_id_order : list 399 a list of string names of the video ids corresponding to the data samples 400 401 """ 402 return [self.get_video_id(x) for x in self.get_original_coordinates()]
Get a list of video ids corresponding to the data samples.
Returns
video_id_order : list a list of string names of the video ids corresponding to the data samples
404 def get_tag(self, idx: int) -> Union[int, None]: 405 """Return a tag object corresponding to an index. 406 407 Tags can carry meta information (like annotator id) and are accepted by models that require 408 that information and by metrics (some metrics have options for averaging over the tags). 409 When a tag is `None`, it is not passed to the model. 410 411 Parameters 412 ---------- 413 idx : int 414 the index 415 416 Returns 417 ------- 418 tag : int 419 the tag index 420 421 """ 422 return None
Return a tag object corresponding to an index.
Tags can carry meta information (like annotator id) and are accepted by models that require
that information and by metrics (some metrics have options for averaging over the tags).
When a tag is None, it is not passed to the model.
Parameters
idx : int the index
Returns
tag : int the tag index
424 def get_indices(self, tag: int) -> List: 425 """Get a list of indices of samples that have a specific meta tag. 426 427 Parameters 428 ---------- 429 tag : int 430 the meta tag for the subsample (`None` for the whole dataset) 431 432 Returns 433 ------- 434 indices : list 435 a list of indices that meet the criteria 436 437 """ 438 return list(range(len(self)))
Get a list of indices of samples that have a specific meta tag.
Parameters
tag : int
the meta tag for the subsample (None for the whole dataset)
Returns
indices : list a list of indices that meet the criteria
452class BehaviorStore(Store): 453 """A class that generates annotation from video information and stores it. 454 455 Processes input video information and generates ordered arrays of annotation samples and corresponding unique 456 original coordinates, as well as some meta objects. 457 It is assumed that the input videos are separated into clips (e.g. corresponding to different individuals). 458 Each video and each clip inside the video has a unique id (video_id and clip_id, correspondingly). 459 The original coordinates object contains information about the video_id, clip_id and start time of the 460 samples in the original input data. 461 An BehaviorStore has to be fully defined with a tuple of key objects. 462 The annotation array can be accessed with integer indices. 463 The samples can be stored as a torch.Tensor in RAM or as an array of file paths to be loaded on runtime. 464 When no arguments are passed a blank class instance should be created that can later be filled with 465 information from key objects 466 """ 467 468 required_objects = [] 469 """ 470 A list of string names of the objects required from the input store. 471 """ 472 473 @abstractmethod 474 def __init__( 475 self, 476 video_order: List = None, 477 key_objects: Tuple = None, 478 annotation_path: Union[str, Set] = None, 479 *args, 480 **kwargs 481 ): 482 """Initialize the class instance. 483 484 Parameters 485 ---------- 486 video_order : list, optional 487 a list of video ids that should be processed in the same order (not passed if creating from key objects) 488 key_objects : tuple, optional 489 a tuple of key objects 490 annotation_path : str | set, optional 491 the path or the set of paths to the folder where the annotation files are stored (not passed if creating 492 from key objects) 493 494 """ 495 if key_objects is not None: 496 self.load_from_key_objects(key_objects) 497 498 @abstractmethod 499 def __getitem__(self, ind: int) -> torch.Tensor: 500 """Return the annotation of the sample corresponding to an index. 501 502 Parameters 503 ---------- 504 ind : int 505 index of the sample 506 507 Returns 508 ------- 509 sample : torch.Tensor 510 the corresponding annotation tensor 511 512 """ 513 514 @abstractmethod 515 def get_len(self, return_unlabeled: bool) -> int: 516 """Get the length of the subsample of labeled/unlabeled data. 517 518 If return_unlabeled is True, the index is in the subsample of unlabeled data, if False in labeled 519 and if return_unlabeled is None the index is already correct 520 521 Parameters 522 ---------- 523 return_unlabeled : bool 524 the identifier for the subsample 525 526 Returns 527 ------- 528 length : int 529 the length of the subsample 530 531 """ 532 533 @abstractmethod 534 def count_classes( 535 self, frac: bool = False, zeros: bool = False, bouts: bool = False 536 ) -> Dict: 537 """Get a dictionary with class-wise frame counts. 538 539 Parameters 540 ---------- 541 frac : bool, default False 542 if `True`, a fraction of the total frame count is returned 543 zeros : bool. default False 544 if `True`, the number of known negative samples is counted (only if the annotation is multi-label) 545 bouts : bool, default False 546 if `True`, instead of frame counts segment counts are returned 547 548 Returns 549 ------- 550 count_dictionary : dict 551 a dictionary with class indices as keys and frame counts as values 552 553 """ 554 555 @abstractmethod 556 def behaviors_dict(self) -> Dict: 557 """Get a dictionary of class names. 558 559 Returns 560 ------- 561 behavior_dictionary: dict 562 a dictionary with class indices as keys and class names as values 563 564 """ 565 566 @abstractmethod 567 def annotation_class(self) -> str: 568 """Get the type of annotation ('exclusive_classification', 'nonexclusive_classification', more coming soon). 569 570 Returns 571 ------- 572 annotation_class : str 573 the type of annotation 574 575 """ 576 577 @abstractmethod 578 def size(self) -> int: 579 """Get the total number of frames in the data. 580 581 Returns 582 ------- 583 size : int 584 the total number of frames 585 586 """ 587 588 @abstractmethod 589 def filtered_indices(self) -> List: 590 """Return the indices of the samples that should be removed. 591 592 Choosing the indices can be based on any kind of filering defined in the __init__ function by the data 593 parameters 594 595 Returns 596 ------- 597 indices_to_remove : list 598 a list of integer indices that should be removed 599 600 """ 601 602 @abstractmethod 603 def set_pseudo_labels(self, labels: torch.Tensor) -> None: 604 """Set pseudo labels to the unlabeled data. 605 606 Parameters 607 ---------- 608 labels : torch.Tensor 609 a tensor of pseudo-labels for the unlabeled data 610 611 """
A class that generates annotation from video information and stores it.
Processes input video information and generates ordered arrays of annotation samples and corresponding unique original coordinates, as well as some meta objects. It is assumed that the input videos are separated into clips (e.g. corresponding to different individuals). Each video and each clip inside the video has a unique id (video_id and clip_id, correspondingly). The original coordinates object contains information about the video_id, clip_id and start time of the samples in the original input data. An BehaviorStore has to be fully defined with a tuple of key objects. The annotation array can be accessed with integer indices. The samples can be stored as a torch.Tensor in RAM or as an array of file paths to be loaded on runtime. When no arguments are passed a blank class instance should be created that can later be filled with information from key objects
473 @abstractmethod 474 def __init__( 475 self, 476 video_order: List = None, 477 key_objects: Tuple = None, 478 annotation_path: Union[str, Set] = None, 479 *args, 480 **kwargs 481 ): 482 """Initialize the class instance. 483 484 Parameters 485 ---------- 486 video_order : list, optional 487 a list of video ids that should be processed in the same order (not passed if creating from key objects) 488 key_objects : tuple, optional 489 a tuple of key objects 490 annotation_path : str | set, optional 491 the path or the set of paths to the folder where the annotation files are stored (not passed if creating 492 from key objects) 493 494 """ 495 if key_objects is not None: 496 self.load_from_key_objects(key_objects)
Initialize the class instance.
Parameters
video_order : list, optional a list of video ids that should be processed in the same order (not passed if creating from key objects) key_objects : tuple, optional a tuple of key objects annotation_path : str | set, optional the path or the set of paths to the folder where the annotation files are stored (not passed if creating from key objects)
514 @abstractmethod 515 def get_len(self, return_unlabeled: bool) -> int: 516 """Get the length of the subsample of labeled/unlabeled data. 517 518 If return_unlabeled is True, the index is in the subsample of unlabeled data, if False in labeled 519 and if return_unlabeled is None the index is already correct 520 521 Parameters 522 ---------- 523 return_unlabeled : bool 524 the identifier for the subsample 525 526 Returns 527 ------- 528 length : int 529 the length of the subsample 530 531 """
Get the length of the subsample of labeled/unlabeled data.
If return_unlabeled is True, the index is in the subsample of unlabeled data, if False in labeled and if return_unlabeled is None the index is already correct
Parameters
return_unlabeled : bool the identifier for the subsample
Returns
length : int the length of the subsample
533 @abstractmethod 534 def count_classes( 535 self, frac: bool = False, zeros: bool = False, bouts: bool = False 536 ) -> Dict: 537 """Get a dictionary with class-wise frame counts. 538 539 Parameters 540 ---------- 541 frac : bool, default False 542 if `True`, a fraction of the total frame count is returned 543 zeros : bool. default False 544 if `True`, the number of known negative samples is counted (only if the annotation is multi-label) 545 bouts : bool, default False 546 if `True`, instead of frame counts segment counts are returned 547 548 Returns 549 ------- 550 count_dictionary : dict 551 a dictionary with class indices as keys and frame counts as values 552 553 """
Get a dictionary with class-wise frame counts.
Parameters
frac : bool, default False
if True, a fraction of the total frame count is returned
zeros : bool. default False
if True, the number of known negative samples is counted (only if the annotation is multi-label)
bouts : bool, default False
if True, instead of frame counts segment counts are returned
Returns
count_dictionary : dict a dictionary with class indices as keys and frame counts as values
555 @abstractmethod 556 def behaviors_dict(self) -> Dict: 557 """Get a dictionary of class names. 558 559 Returns 560 ------- 561 behavior_dictionary: dict 562 a dictionary with class indices as keys and class names as values 563 564 """
Get a dictionary of class names.
Returns
behavior_dictionary: dict a dictionary with class indices as keys and class names as values
566 @abstractmethod 567 def annotation_class(self) -> str: 568 """Get the type of annotation ('exclusive_classification', 'nonexclusive_classification', more coming soon). 569 570 Returns 571 ------- 572 annotation_class : str 573 the type of annotation 574 575 """
Get the type of annotation ('exclusive_classification', 'nonexclusive_classification', more coming soon).
Returns
annotation_class : str the type of annotation
577 @abstractmethod 578 def size(self) -> int: 579 """Get the total number of frames in the data. 580 581 Returns 582 ------- 583 size : int 584 the total number of frames 585 586 """
Get the total number of frames in the data.
Returns
size : int the total number of frames
588 @abstractmethod 589 def filtered_indices(self) -> List: 590 """Return the indices of the samples that should be removed. 591 592 Choosing the indices can be based on any kind of filering defined in the __init__ function by the data 593 parameters 594 595 Returns 596 ------- 597 indices_to_remove : list 598 a list of integer indices that should be removed 599 600 """
Return the indices of the samples that should be removed.
Choosing the indices can be based on any kind of filering defined in the __init__ function by the data parameters
Returns
indices_to_remove : list a list of integer indices that should be removed
602 @abstractmethod 603 def set_pseudo_labels(self, labels: torch.Tensor) -> None: 604 """Set pseudo labels to the unlabeled data. 605 606 Parameters 607 ---------- 608 labels : torch.Tensor 609 a tensor of pseudo-labels for the unlabeled data 610 611 """
Set pseudo labels to the unlabeled data.
Parameters
labels : torch.Tensor a tensor of pseudo-labels for the unlabeled data
614class PoseInputStore(InputStore): 615 """A subclass of `InputStore` for pose estimation data. 616 617 Contains methods used by pose estimation feature extractors. 618 All methods receive a data dictionary as input. This dictionary is the same as what is passed to the 619 feature extractor and the only limitations for the structure are that it has to relate to one video id 620 and have clip ids as keys. Read the documentation at `dlc2action.data` to find out more about videos 621 and clips. 622 """ 623 624 def get_likelihood( 625 self, data_dict: Dict, clip_id: str, bodypart: str 626 ) -> Union[np.ndarray, None]: 627 """Get the likelihood values. 628 629 Parameters 630 ---------- 631 data_dict : dict 632 the data dictionary 633 clip_id : str 634 the clip id 635 bodypart : str 636 the name of the body part 637 638 Returns 639 ------- 640 likelihoods: np.ndarrray | None 641 `None` if the dataset doesn't have likelihoods or an array of shape (#timestamps) 642 643 """ 644 return None 645 646 @abstractmethod 647 def get_coords(self, data_dict: Dict, clip_id: str, bodypart: str) -> np.ndarray: 648 """Get the coordinates array of a specific body part in a specific clip. 649 650 Parameters 651 ---------- 652 data_dict : dict 653 the data dictionary 654 clip_id : str 655 the clip id 656 bodypart : str 657 the name of the body part 658 659 Returns 660 ------- 661 coords : np.ndarray 662 the coordinates array of shape (#timesteps, #coordinates) 663 664 """ 665 666 @abstractmethod 667 def get_bodyparts(self) -> List: 668 """Get a list of bodypart names. 669 670 Returns 671 ------- 672 bodyparts : list 673 a list of string or integer body part names 674 675 """ 676 677 @abstractmethod 678 def get_n_frames(self, data_dict: Dict, clip_id: str) -> int: 679 """Get the length of the clip. 680 681 Parameters 682 ---------- 683 data_dict : dict 684 the data dictionary 685 clip_id : str 686 the clip id 687 688 Returns 689 ------- 690 n_frames : int 691 the length of the clip 692 693 """
A subclass of InputStore for pose estimation data.
Contains methods used by pose estimation feature extractors.
All methods receive a data dictionary as input. This dictionary is the same as what is passed to the
feature extractor and the only limitations for the structure are that it has to relate to one video id
and have clip ids as keys. Read the documentation at dlc2action.data to find out more about videos
and clips.
624 def get_likelihood( 625 self, data_dict: Dict, clip_id: str, bodypart: str 626 ) -> Union[np.ndarray, None]: 627 """Get the likelihood values. 628 629 Parameters 630 ---------- 631 data_dict : dict 632 the data dictionary 633 clip_id : str 634 the clip id 635 bodypart : str 636 the name of the body part 637 638 Returns 639 ------- 640 likelihoods: np.ndarrray | None 641 `None` if the dataset doesn't have likelihoods or an array of shape (#timestamps) 642 643 """ 644 return None
Get the likelihood values.
Parameters
data_dict : dict the data dictionary clip_id : str the clip id bodypart : str the name of the body part
Returns
likelihoods: np.ndarrray | None
None if the dataset doesn't have likelihoods or an array of shape (#timestamps)
646 @abstractmethod 647 def get_coords(self, data_dict: Dict, clip_id: str, bodypart: str) -> np.ndarray: 648 """Get the coordinates array of a specific body part in a specific clip. 649 650 Parameters 651 ---------- 652 data_dict : dict 653 the data dictionary 654 clip_id : str 655 the clip id 656 bodypart : str 657 the name of the body part 658 659 Returns 660 ------- 661 coords : np.ndarray 662 the coordinates array of shape (#timesteps, #coordinates) 663 664 """
Get the coordinates array of a specific body part in a specific clip.
Parameters
data_dict : dict the data dictionary clip_id : str the clip id bodypart : str the name of the body part
Returns
coords : np.ndarray the coordinates array of shape (#timesteps, #coordinates)
666 @abstractmethod 667 def get_bodyparts(self) -> List: 668 """Get a list of bodypart names. 669 670 Returns 671 ------- 672 bodyparts : list 673 a list of string or integer body part names 674 675 """
Get a list of bodypart names.
Returns
bodyparts : list a list of string or integer body part names
677 @abstractmethod 678 def get_n_frames(self, data_dict: Dict, clip_id: str) -> int: 679 """Get the length of the clip. 680 681 Parameters 682 ---------- 683 data_dict : dict 684 the data dictionary 685 clip_id : str 686 the clip id 687 688 Returns 689 ------- 690 n_frames : int 691 the length of the clip 692 693 """
Get the length of the clip.
Parameters
data_dict : dict the data dictionary clip_id : str the clip id
Returns
n_frames : int the length of the clip