Skip to content

scandeval.enums

docs module scandeval.enums

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
"""Enums used in the project."""

from enum import Enum, auto


class AutoStrEnum(str, Enum):
    """StrEnum where auto() returns the field name in lower case."""

    @staticmethod
    def _generate_next_value_(
        name: str, start: int, count: int, last_values: list
    ) -> str:
        return name.lower()


class Device(AutoStrEnum):
    """The compute device to use for the evaluation.

    Attributes:
        CPU:
            CPU device.
        MPS:
            MPS GPU, used in M-series MacBooks.
        CUDA:
            CUDA GPU, used with NVIDIA GPUs.
    """

    CPU = auto()
    MPS = auto()
    CUDA = auto()


class InferenceBackend(AutoStrEnum):
    """The backend used for model inference.

    Attributes:
        TRANSFORMERS:
            Hugging Face `transformers` library.
        VLLM:
            VLLM library.
        LITELLM:
            LiteLLM library.
        NONE:
            No inference backend used (e.g., for human evaluation).
    """

    TRANSFORMERS = auto()
    VLLM = auto()
    LITELLM = auto()
    NONE = auto()


class ModelType(AutoStrEnum):
    """The type of a model.

    Attributes:
        ENCODER:
            An encoder (i.e., BERT-style) model.
        GENERATIVE:
            A generative model. Can be either decoder or encoder-decoder (aka seq2seq).
        HUMAN:
            Human evaluator.
    """

    ENCODER = auto()
    GENERATIVE = auto()
    HUMAN = auto()


class GenerativeType(AutoStrEnum):
    """The type of a generative model.

    Attributes:
        BASE:
            A base (i.e., pretrained) generative model.
        INSTRUCTION_TUNED:
            An instruction-tuned generative model.
        REASONING:
            A generative reasoning model.
    """

    BASE = auto()
    INSTRUCTION_TUNED = auto()
    REASONING = auto()


class DataType(AutoStrEnum):
    """The data type of the model weights.

    Attributes:
        FP32:
            32-bit floating point.
        FP16:
            16-bit floating point.
        BF16:
            16-bit bfloat.
    """

    FP32 = auto()
    FP16 = auto()
    BF16 = auto()


class BatchingPreference(AutoStrEnum):
    """The preference for batching.

    Attributes:
        NO_PREFERENCE:
            No preference for batching.
        SINGLE_SAMPLE:
            Single sample batching.
        ALL_AT_ONCE:
            All samples at once batching.
    """

    NO_PREFERENCE = auto()
    SINGLE_SAMPLE = auto()
    ALL_AT_ONCE = auto()


class TaskGroup(AutoStrEnum):
    """The overall task group of a task.

    Attributes:
        SEQUENCE_CLASSIFICATION:
            Classification of documents.
        MULTIPLE_CHOICE_CLASSIFICATION:
            Classification of documents with multiple-choice options.
        TOKEN_CLASSIFICATION:
            Token-level classification.
        QUESTION_ANSWERING:
            Extractive question answering.
        TEXT_TO_TEXT:
            Text-to-text generation.
        SPEED:
            Speed benchmark.
    """

    SEQUENCE_CLASSIFICATION = auto()
    MULTIPLE_CHOICE_CLASSIFICATION = auto()
    TOKEN_CLASSIFICATION = auto()
    QUESTION_ANSWERING = auto()
    TEXT_TO_TEXT = auto()
    SPEED = auto()