Datasets:
The dataset viewer is not available for this split.
The number of columns (48011) exceeds the maximum supported number of columns (1000). This is a current limitation of the datasets viewer. You can reduce the number of columns if you want the viewer to work.
Error code: TooManyColumnsError
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
Audio MNIST
Based on AudioMNIST.
Generation of the Parquet File
Given the path to the data folder from the source as audioMNISTFolderPath:
import numpy as np
import pandas as pd
import json
# Load all wave files in AudioMNIST dataset
# Parse each file name as <digit>_<speaker>_<index>.wav
dfData = pd.DataFrame(columns = ['Digit', 'Speaker', 'Index', 'SampleRate', 'NumSamples', 'Accent', 'Age', 'Gender', 'NativeSpeaker', 'Continent', 'Country', 'City', 'Room'])
with open(os.path.join(audioMNISTFolderPath, 'audioMNIST_meta.txt'), 'r') as f:
dMetadata = json.load(f)
lWaveFiles = []
lFolders = os.listdir(audioMNISTFolderPath)
lFolders = [fld for fld in lFolders if os.path.isdir(os.path.join(audioMNISTFolderPath, fld))]
lFolders.sort()
fileIdx = -1
for fld in lFolders:
lFiles = os.listdir(os.path.join(audioMNISTFolderPath, fld))
lFiles = [f for f in lFiles if f.endswith('.wav')]
lFiles.sort()
print(f'Folder {fld}: {len(lFiles)} files')
for f in lFiles:
fileIdx += 1
# Parse File Name
digitIdx, speakerIdx, recIdx = f[:-4].split('_')
sampleRate, vAudioData = sp.io.wavfile.read(os.path.join(audioMNISTFolderPath, fld, f))
lWaveFiles.append(vAudioData)
dfData.loc[fileIdx, 'Digit'] = int(digitIdx)
dfData.loc[fileIdx, 'Speaker'] = int(speakerIdx)
dfData.loc[fileIdx, 'Index'] = int(recIdx)
dfData.loc[fileIdx, 'SampleRate'] = int(sampleRate)
dfData.loc[fileIdx, 'NumSamples'] = int(len(vAudioData))
# Parse Metadata
metaIdx = f'{int(speakerIdx):02d}'
dfData.loc[fileIdx, 'Accent'] = dMetadata[metaIdx]['accent']
dfData.loc[fileIdx, 'Age'] = int(dMetadata[metaIdx]['age'])
dfData.loc[fileIdx, 'Gender'] = dMetadata[metaIdx]['gender']
dfData.loc[fileIdx, 'NativeSpeaker'] = dMetadata[metaIdx]['native speaker']
# Parse Continent, Country, City
locationStr = dMetadata[metaIdx]['origin']
# Remove spaces, split by ','
locationStr = locationStr.replace(' ', '')
contStr, countryStr, cityStr = locationStr.split(',')
dfData.loc[fileIdx, 'Continent'] = contStr
dfData.loc[fileIdx, 'Country'] = countryStr
dfData.loc[fileIdx, 'City'] = cityStr
dfData.loc[fileIdx, 'Room'] = dMetadata[metaIdx]['recordingroom']
# Generate DataFrame of the Audio Data
maxSignals = dfData.shape[0]
maxNumSamples = dfData['NumSamples'].max()
mA = np.zeros((maxSignals, maxNumSamples), dtype = np.int16)
for ii, vA in enumerate(lWaveFiles):
mA[ii, :len(vA)] = vA
dfAudio = pd.DataFrame(data = mA, columns = [f'{sampleIdx:d}' for sampleIdx in range(maxNumSamples)])
# Generate the AudioMNIST Data Frame
dfAudioMnist = pd.concat([dfData, dfAudio], axis = 1)
# Set the Type per column
dfAudioMnist['Digit'] = dfAudioMnist['Digit'].astype(np.int8)
dfAudioMnist['Speaker'] = dfAudioMnist['Speaker'].astype(np.int8)
dfAudioMnist['Index'] = dfAudioMnist['Index'].astype(np.int32)
dfAudioMnist['SampleRate'] = dfAudioMnist['SampleRate'].astype(np.int32)
dfAudioMnist['NumSamples'] = dfAudioMnist['NumSamples'].astype(np.int32)
dfAudioMnist['Age'] = dfAudioMnist['Age'].astype(np.int32)
dfAudioMnist['Gender'] = dfAudioMnist['Gender'].map({'male': 'Male', 'female': 'Female'})
dfAudioMnist['NativeSpeaker'] = dfAudioMnist['NativeSpeaker'].map({'yes': True, 'no': False})
dfAudioMnist['NativeSpeaker'] = dfAudioMnist['NativeSpeaker'].astype(bool)
# Export to Parquet
dfAudioMnist.to_parquet(os.path.join(audioMNISTFolderPath, 'AudioMNIST.parquet'), index = False)
- Downloads last month
- 15