Skip to content

csvdataset

CSVDataset

Bases: Dataset

A PyTorch Dataset for handling CSV data.

Source code in spotpython/data/csvdataset.py
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
class CSVDataset(Dataset):
    """
    A PyTorch Dataset for handling CSV data.
    """

    def __init__(
        self,
        filename: str = "data.csv",
        directory: None = None,
        feature_type: torch.dtype = torch.float,
        target_column: str = "y",
        target_type: torch.dtype = torch.float,
        train: bool = True,
        rmNA=True,
        dropId=False,
        oe=OrdinalEncoder(),
        le=LabelEncoder(),
        **desc,
    ) -> None:
        super().__init__()
        self.filename = filename
        self.directory = directory
        self.feature_type = feature_type
        self.target_type = target_type
        self.target_column = target_column
        self.train = train
        self.rmNA = rmNA
        self.dropId = dropId
        self.oe = oe
        self.le = le
        self.data, self.targets = self._load_data()

    @property
    def path(self):
        if self.directory:
            return pathlib.Path(self.directory).joinpath(self.filename)
        return pathlib.Path(__file__).parent.joinpath(self.filename)

    @property
    def _repr_content(self):
        content = super()._repr_content
        content["Path"] = str(self.path)
        return content

    def _load_data(self) -> tuple:
        df = pd.read_csv(self.path, index_col=False)

        # Remove rows with NA if specified
        if self.rmNA:
            df = df.dropna()

        # Drop the id column if specified
        if self.dropId and "id" in df.columns:
            df = df.drop(columns=["id"])

        # Split DataFrame into feature and target DataFrames
        feature_df = df.drop(columns=[self.target_column])

        # Identify non-numerical columns in the feature DataFrame
        non_numerical_columns = feature_df.select_dtypes(exclude=["number"]).columns.tolist()

        # Apply OrdinalEncoder to non-numerical feature columns
        if non_numerical_columns:
            if self.oe is None:
                raise ValueError(
                    f"\n!!! non_numerical_columns in data: {non_numerical_columns}"
                    "\nOrdinalEncoder object oe must be provided for encoding non-numerical columns"
                )
            feature_df[non_numerical_columns] = self.oe.fit_transform(feature_df[non_numerical_columns])

        target_df = df[self.target_column]

        # Check if the target column is non-numerical using dtype
        if not pd.api.types.is_numeric_dtype(target_df):
            if self.le is None:
                raise ValueError(
                    f"\n!!! The target column '{self.target_column}' is non-numerical"
                    "\nLabelEncoder object le must be provided for encoding non-numerical target"
                )
            target_df = self.le.fit_transform(target_df)

        # Convert DataFrames to NumPy arrays and then to PyTorch tensors
        feature_array = feature_df.to_numpy()
        target_array = target_df

        feature_tensor = torch.tensor(feature_array, dtype=self.feature_type)
        target_tensor = torch.tensor(target_array, dtype=self.target_type)

        return feature_tensor, target_tensor

    def __getitem__(self, idx: int) -> tuple:
        """
        Returns the feature and target at the given index.

        Args:
            idx (int): The index.

        Returns:
            tuple: A tuple containing the feature and target at the given index.

        Examples:
            >>> from spotpython.light.csvdataset import CSVDataset
                dataset = CSVDataset(filename='./data/spotpython/data.csv', target_column='prognosis')
                print(dataset.data.shape)
                print(dataset.targets.shape)
                torch.Size([11, 65])
                torch.Size([11])
        """
        feature = self.data[idx]
        target = self.targets[idx]
        return feature, target

    def __len__(self) -> int:
        """
        Returns the length of the dataset.

        Returns:
            int: The length of the dataset.

        Examples:
            >>> from spotpython.light import CSVDataset
            >>> dataset = CSVDataset()
            >>> print(len(dataset))
            60000

        """
        return len(self.data)

    def extra_repr(self) -> str:
        """
        Returns a string representation of the dataset.

        Returns:
            str: A string representation of the dataset.

        Examples:
            >>> from spotpython.light import CSVDataset
            >>> dataset = CSVDataset()
            >>> print(dataset)
            Split: Train

        """
        split = "Train" if self.train else "Test"
        return f"Split: {split}"

    def __ncols__(self) -> int:
        """
        Returns the number of columns in the dataset.

        Returns:
            int: The number of columns in the dataset.

        Examples:
            >>> from spotpython.data.pkldataset import PKLDataset
                import torch
                from torch.utils.data import DataLoader
                dataset = PKLDataset(target_column='prognosis', feature_type=torch.long)
                print(dataset.__ncols__())
                64
        """
        return self.data.size(1)

__getitem__(idx)

Returns the feature and target at the given index.

Parameters:

Name Type Description Default
idx int

The index.

required

Returns:

Name Type Description
tuple tuple

A tuple containing the feature and target at the given index.

Examples:

>>> from spotpython.light.csvdataset import CSVDataset
    dataset = CSVDataset(filename='./data/spotpython/data.csv', target_column='prognosis')
    print(dataset.data.shape)
    print(dataset.targets.shape)
    torch.Size([11, 65])
    torch.Size([11])
Source code in spotpython/data/csvdataset.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
def __getitem__(self, idx: int) -> tuple:
    """
    Returns the feature and target at the given index.

    Args:
        idx (int): The index.

    Returns:
        tuple: A tuple containing the feature and target at the given index.

    Examples:
        >>> from spotpython.light.csvdataset import CSVDataset
            dataset = CSVDataset(filename='./data/spotpython/data.csv', target_column='prognosis')
            print(dataset.data.shape)
            print(dataset.targets.shape)
            torch.Size([11, 65])
            torch.Size([11])
    """
    feature = self.data[idx]
    target = self.targets[idx]
    return feature, target

__len__()

Returns the length of the dataset.

Returns:

Name Type Description
int int

The length of the dataset.

Examples:

>>> from spotpython.light import CSVDataset
>>> dataset = CSVDataset()
>>> print(len(dataset))
60000
Source code in spotpython/data/csvdataset.py
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
def __len__(self) -> int:
    """
    Returns the length of the dataset.

    Returns:
        int: The length of the dataset.

    Examples:
        >>> from spotpython.light import CSVDataset
        >>> dataset = CSVDataset()
        >>> print(len(dataset))
        60000

    """
    return len(self.data)

__ncols__()

Returns the number of columns in the dataset.

Returns:

Name Type Description
int int

The number of columns in the dataset.

Examples:

>>> from spotpython.data.pkldataset import PKLDataset
    import torch
    from torch.utils.data import DataLoader
    dataset = PKLDataset(target_column='prognosis', feature_type=torch.long)
    print(dataset.__ncols__())
    64
Source code in spotpython/data/csvdataset.py
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def __ncols__(self) -> int:
    """
    Returns the number of columns in the dataset.

    Returns:
        int: The number of columns in the dataset.

    Examples:
        >>> from spotpython.data.pkldataset import PKLDataset
            import torch
            from torch.utils.data import DataLoader
            dataset = PKLDataset(target_column='prognosis', feature_type=torch.long)
            print(dataset.__ncols__())
            64
    """
    return self.data.size(1)

extra_repr()

Returns a string representation of the dataset.

Returns:

Name Type Description
str str

A string representation of the dataset.

Examples:

>>> from spotpython.light import CSVDataset
>>> dataset = CSVDataset()
>>> print(dataset)
Split: Train
Source code in spotpython/data/csvdataset.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
def extra_repr(self) -> str:
    """
    Returns a string representation of the dataset.

    Returns:
        str: A string representation of the dataset.

    Examples:
        >>> from spotpython.light import CSVDataset
        >>> dataset = CSVDataset()
        >>> print(dataset)
        Split: Train

    """
    split = "Train" if self.train else "Test"
    return f"Split: {split}"