diff --git a/funasr/auto/auto_model.py b/funasr/auto/auto_model.py index 56dd5b5e4..9bb9ce07d 100644 --- a/funasr/auto/auto_model.py +++ b/funasr/auto/auto_model.py @@ -41,7 +41,7 @@ def prepare_data_iterator(data_in, input_len=None, data_type=None, key=None): chars = string.ascii_letters + string.digits if isinstance(data_in, str) and data_in.startswith('http'): # url data_in = download_from_url(data_in) - pdb.set_trace() + if isinstance(data_in, str) and os.path.exists(data_in): # wav_path; filelist: wav.scp, file.jsonl;text.txt; _, file_extension = os.path.splitext(data_in) file_extension = file_extension.lower() diff --git a/funasr/models/lcbnet/model.py b/funasr/models/lcbnet/model.py index ab557e6d8..09e6dd137 100644 --- a/funasr/models/lcbnet/model.py +++ b/funasr/models/lcbnet/model.py @@ -426,6 +426,7 @@ class LCBNet(nn.Module): tokenizer=tokenizer) time2 = time.perf_counter() meta_data["load_data"] = f"{time2 - time1:0.3f}" + pdb.set_trace() audio_sample_list = sample_list[0] ocr_sample_list = sample_list[1] speech, speech_lengths = extract_fbank(audio_sample_list, data_type=kwargs.get("data_type", "sound"), @@ -441,7 +442,7 @@ class LCBNet(nn.Module): encoder_out, encoder_out_lens = self.encode(speech, speech_lengths) if isinstance(encoder_out, tuple): encoder_out = encoder_out[0] - + pdb.set_trace() ocr_list_new = [[x + 1 if x != 0 else x for x in sublist] for sublist in ocr_sample_list] ocr = torch.tensor(ocr_list_new).to(device=kwargs["device"]) ocr_lengths = ocr.new_full([1], dtype=torch.long, fill_value=ocr.size(1)).to(device=kwargs["device"]) diff --git a/funasr/utils/load_utils.py b/funasr/utils/load_utils.py index ccb5670c2..644af2324 100644 --- a/funasr/utils/load_utils.py +++ b/funasr/utils/load_utils.py @@ -31,7 +31,7 @@ def load_audio_text_image_video(data_or_path_or_list, fs: int = 16000, audio_fs: return [load_audio_text_image_video(audio, fs=fs, audio_fs=audio_fs, data_type=data_type, **kwargs) for audio in data_or_path_or_list] if isinstance(data_or_path_or_list, str) and data_or_path_or_list.startswith('http'): # download url to local file data_or_path_or_list = download_from_url(data_or_path_or_list) - pdb.set_trace() + if isinstance(data_or_path_or_list, str) and os.path.exists(data_or_path_or_list): # local file if data_type is None or data_type == "sound": data_or_path_or_list, audio_fs = torchaudio.load(data_or_path_or_list) @@ -67,7 +67,7 @@ def load_audio_text_image_video(data_or_path_or_list, fs: int = 16000, audio_fs: else: pass # print(f"unsupport data type: {data_or_path_or_list}, return raw data") - pdb.set_trace() + if audio_fs != fs and data_type != "text": resampler = torchaudio.transforms.Resample(audio_fs, fs) data_or_path_or_list = resampler(data_or_path_or_list[None, :])[0, :]