Sidon: Fast and Robust Open-Source Multilingual Speech Restoration for Large-scale Dataset Cleansing
Paper
• 2509.17052 • Published
• 3
Error code: StreamingRowsError
Exception: RuntimeError
Message: Failed to open input buffer: Invalid data found when processing input
Traceback: Traceback (most recent call last):
File "/src/services/worker/src/worker/utils.py", line 99, in get_rows_or_raise
return get_rows(
^^^^^^^^^
File "/src/libs/libcommon/src/libcommon/utils.py", line 272, in decorator
return func(*args, **kwargs)
^^^^^^^^^^^^^^^^^^^^^
File "/src/services/worker/src/worker/utils.py", line 77, in get_rows
rows_plus_one = list(itertools.islice(ds, rows_max_number + 1))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2543, in __iter__
for key, example in ex_iterable:
^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/iterable_dataset.py", line 2061, in __iter__
batch = formatter.format_batch(pa_table)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/formatting/formatting.py", line 472, in format_batch
batch = self.python_features_decoder.decode_batch(batch)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/formatting/formatting.py", line 234, in decode_batch
return self.features.decode_batch(batch, token_per_repo_id=self.token_per_repo_id) if self.features else batch
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/features/features.py", line 2161, in decode_batch
decode_nested_example(self[column_name], value, token_per_repo_id=token_per_repo_id)
File "/usr/local/lib/python3.12/site-packages/datasets/features/features.py", line 1419, in decode_nested_example
return schema.decode_example(obj, token_per_repo_id=token_per_repo_id) if obj is not None else None
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/datasets/features/audio.py", line 211, in decode_example
audio = AudioDecoder(
^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/torchcodec/decoders/_audio_decoder.py", line 64, in __init__
self._decoder = create_decoder(source=source, seek_mode="approximate")
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/torchcodec/decoders/_decoder_utils.py", line 45, in create_decoder
return core.create_from_file_like(source, seek_mode)
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
File "/usr/local/lib/python3.12/site-packages/torchcodec/_core/ops.py", line 151, in create_from_file_like
return _convert_to_tensor(_pybind_ops.create_from_file_like(file_like, seek_mode))
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
RuntimeError: Failed to open input buffer: Invalid data found when processing inputNeed help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
This is a preprocessed dataset made by applying Sidon: Fast and Robust Open-Source Multilingual Speech Restoration for Large-scale Dataset Cleansing (https://huggingface.co/spaces/sarulab-speech/sidon_demo_beta) on LJSpeech-1.1 dataset
Format: Following Stylish-TTS
Files:
Reproduction code:
%cd /content
!sudo apt install aria2 -y
!rm LJSpeech-1.1.tar.bz2
!aria2c -x 16 https://data.keithito.com/data/speech/LJSpeech-1.1.tar.bz2
!tar -xf LJSpeech-1.1.tar.bz2
!uv pip install "misaki[en] @ git+https://github.com/Fannovel16/misaki" --system
import numpy as np
import torch
import torchaudio
import transformers
from huggingface_hub import hf_hub_download
import soundfile as sf
from misaki.en import G2P
g2p = G2P()
fe_path = hf_hub_download("sarulab-speech/sidon-v0.1", filename="feature_extractor_cuda.pt")
decoder_path = hf_hub_download("sarulab-speech/sidon-v0.1", filename="decoder_cuda.pt")
preprocessor = transformers.SeamlessM4TFeatureExtractor.from_pretrained(
"facebook/w2v-bert-2.0"
)
fe = torch.jit.load(fe_path,map_location='cuda').to('cuda')
decoder = torch.jit.load(decoder_path,map_location='cuda').to('cuda')
def denoise_speech(audio):
if audio is None:
return None
sample_rate, waveform = audio
waveform = 0.9 * (waveform / np.abs(waveform).max())
target_n_samples = int(48_000/sample_rate* waveform.shape[0])
# Ensure waveform is a tensor
if not isinstance(waveform, torch.Tensor):
waveform = torch.tensor(waveform, dtype=torch.float32)
# If stereo, convert to mono
if waveform.ndim > 1 and waveform.shape[0] > 1:
waveform = torch.mean(waveform, dim=1)
# Add a batch dimension
waveform = waveform.view(1, -1)
wav = torchaudio.functional.highpass_biquad(waveform, sample_rate, 50)
wav_16k = torchaudio.functional.resample(wav, sample_rate, 16_000)
restoreds = []
features =[]
feature_cache = None
wav_16k = torch.nn.functional.pad(wav_16k,(0,24000))
for chunk in wav_16k.view(-1).split(16000 * 96):
inputs = preprocessor(
torch.nn.functional.pad(chunk, (160, 160)), sampling_rate=16_000, return_tensors="pt"
).to('cpu')
with torch.inference_mode():
feature = fe(inputs["input_features"].to("cuda"))["last_hidden_state"]
if feature_cache is not None:
feature = torch.cat([feature_cache,feature],dim=1)
restoreds.append(decoder(feature.transpose(1,2)).view(-1)[:-960])
feature_cache = feature[:,-1:]
restored_wav = torch.cat(restoreds,dim=0)
return 48_000, restored_wav.cpu().numpy()[:target_n_samples]
!rm -r /content/ljspeech-sidon-48khz /content/ljspeech-sidon-24khz
!mkdir /content/ljspeech-sidon-48khz
!mkdir /content/ljspeech-sidon-48khz/wavs
!mkdir /content/ljspeech-sidon-24khz
!mkdir /content/ljspeech-sidon-24khz/wavs
from pathlib import Path
from tqdm import tqdm
import soundfile as sf
import librosa
train_list, val_list = [], []
for line in tqdm(Path("/content/LJSpeech-1.1/metadata.csv").read_text().splitlines()):
fid, _, text = line.split('|')
book_id = int(fid.split('-')[0].removeprefix('LJ'))
phoneme = g2p(text)[0]
line = f"{fid}.wav|{phoneme}|0|{text}"
waveform, sr = sf.read(f"/content/LJSpeech-1.1/wavs/{fid}.wav")
sr, waveform = denoise_speech((sr, waveform))
waveform_24khz = librosa.resample(waveform, orig_sr=sr, target_sr=24_000)
sf.write(f"/content/ljspeech-sidon-48khz/wavs/{fid}.wav", waveform, sr)
sf.write(f"/content/ljspeech-sidon-24khz/wavs/{fid}.wav", waveform_24khz, 24_000)
if book_id <= 40:
train_list.append(line)
else:
val_list.append(line)
train_list = '\n'.join(train_list)
val_list = '\n'.join(val_list)
Path("/content/ljspeech-sidon-48khz/train-list.txt").write_text(train_list)
Path("/content/ljspeech-sidon-48khz/val-list.txt").write_text(val_list)
Path("/content/ljspeech-sidon-24khz/train-list.txt").write_text(train_list)
Path("/content/ljspeech-sidon-24khz/val-list.txt").write_text(val_list)