Datasets:

Languages:
English
License:
Dataset Viewer
The dataset viewer is not available for this subset.
Cannot get the split names for the config 'default' of the dataset.
Exception:    SplitsNotFoundError
Message:      The split names could not be parsed from the dataset config.
Traceback:    Traceback (most recent call last):
                File "/usr/local/lib/python3.12/site-packages/datasets/inspect.py", line 289, in get_dataset_config_info
                  for split_generator in builder._split_generators(
                                         ^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/packaged_modules/hdf5/hdf5.py", line 64, in _split_generators
                  with h5py.File(first_file, "r") as h5:
                       ^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/h5py/_hl/files.py", line 564, in __init__
                  fid = make_fid(name, mode, userblock_size, fapl, fcpl, swmr=swmr)
                        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/h5py/_hl/files.py", line 238, in make_fid
                  fid = h5f.open(name, flags, fapl=fapl)
                        ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
                File "h5py/_objects.pyx", line 56, in h5py._objects.with_phil.wrapper
                File "h5py/_objects.pyx", line 57, in h5py._objects.with_phil.wrapper
                File "h5py/h5f.pyx", line 102, in h5py.h5f.open
              FileNotFoundError: [Errno 2] Unable to synchronously open file (unable to open file: name = 'hf://datasets/cminst/ScanObjectNN@3548dc30ac72b72365a9c95e6d6af556b8b9e741/main_split/training_objectdataset.h5', errno = 2, error message = 'No such file or directory', flags = 0, o_flags = 0)
              
              The above exception was the direct cause of the following exception:
              
              Traceback (most recent call last):
                File "/src/services/worker/src/worker/job_runners/config/split_names.py", line 65, in compute_split_names_from_streaming_response
                  for split in get_dataset_split_names(
                               ^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/inspect.py", line 343, in get_dataset_split_names
                  info = get_dataset_config_info(
                         ^^^^^^^^^^^^^^^^^^^^^^^^
                File "/usr/local/lib/python3.12/site-packages/datasets/inspect.py", line 294, in get_dataset_config_info
                  raise SplitsNotFoundError("The split names could not be parsed from the dataset config.") from err
              datasets.inspect.SplitsNotFoundError: The split names could not be parsed from the dataset config.

Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.

ScanObjectNN

scanobjectnn_PB_T50_RS_h5.zip contains h5 files for the hard variant of the ScanObjectNN benchmark.

Dataset can be loaded as follows:

import os.path as osp
import os
import torch
import h5py
import torch_geometric.transforms as T
from torch_geometric.datasets import ModelNet
from torch_geometric.data import InMemoryDataset, download_url, extract_zip, Data

class ScanObjectNN(InMemoryDataset):
    url = 'https://huggingface.co/datasets/cminst/ScanObjectNN/resolve/main/scanobjectnn_PB_T50_RS_h5.zip'

    def __init__(self, root, train=True, transform=None, pre_transform=None, pre_filter=None):
        self.train = train
        super().__init__(root, transform, pre_transform, pre_filter)
        path = self.processed_paths[0] if train else self.processed_paths[1]
        self.load(path)

    @property
    def raw_file_names(self):
        return [
            osp.join('main_split', 'training_objectdataset_augmentedrot_scale75.h5'),
            osp.join('main_split', 'test_objectdataset_augmentedrot_scale75.h5')
        ]

    @property
    def processed_file_names(self):
        return ['training.pt', 'test.pt']

    def download(self):
        path = download_url(self.url, self.raw_dir)
        extract_zip(path, self.raw_dir)
        os.unlink(path)

    def process(self):
        self.save(self.process_set('training'), self.processed_paths[0])
        self.save(self.process_set('test'), self.processed_paths[1])

    def process_set(self, split):
        filename = f'{split}_objectdataset_augmentedrot_scale75.h5'

        h5_path = osp.join(self.raw_dir, 'main_split', filename)

        with h5py.File(h5_path, 'r') as f:
            data = f['data'][:].astype('float32')
            labels = f['label'][:].astype('int64')

        data_list = []
        for i in range(data.shape[0]):
            pos = torch.from_numpy(data[i])
            y = torch.tensor(labels[i]).view(1)

            d = Data(pos=pos, y=y)
            data_list.append(d)

        if self.pre_filter is not None:
            data_list = [d for d in data_list if self.pre_filter(d)]

        if self.pre_transform is not None:
            data_list = [self.pre_transform(d) for d in data_list]

        return data_list

if __name__ == '__main__':
    dataset = ScanObjectNN(root='data/ScanObjectNN', train=True)
    print(f'Dataset: {dataset}')
    print(f'First graph: {dataset[0]}')
Downloads last month
30