https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI

Google Colab

https://colab.research.google.com/

インストールするためのコマンド:

!apt-get -y install build-essential python3-dev ffmpeg
!pip3 install --upgrade setuptools wheel
!pip3 install --upgrade pip
!pip3 install faiss-gpu fairseq gradio ffmpeg ffmpeg-python praat-parselmouth pyworld numpy==1.23.5 numba==0.56.4 librosa==0.9.2
!apt -y install -qq aria2
!pip install mega.py --quiet
!pip install gdown --quiet
import ipywidgets as widgets
from IPython.display import clear_output
import os, shutil
success=widgets.Button(description="\\u2714 Done",disabled=True, button_style="success")

!rm -rf /content/Retrieval-based-Voice-Conversion-WebUI
!git clone <https://github.com/777gt/Mangio-RVC-Fork.git> Retrieval-based-Voice-Conversion-WebUI
!git clone <https://github.com/maxrmorrison/torchcrepe.git>
!mv torchcrepe/torchcrepe Retrieval-based-Voice-Conversion-WebUI/
!rm -rf torchcrepe 

%cd /content/Retrieval-based-Voice-Conversion-WebUI
!wget <https://raw.githubusercontent.com/777gt/EVC/main/easy-infer2.py> -O easy-infer.py
!mkdir -p pretrained weights

!mkdir -p /content/Retrieval-based-Voice-Conversion-WebUI/weights

# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M <https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP2-人声vocals+非人声instrumentals.pth> -d /content/Retrieval-based-Voice-Conversion-WebUI/weights -o HP2-人声vocals+非人声instrumentals.pth
# !aria2c --console-log-level=error -c -x 16 -s 16 -k 1M <https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/uvr5_weights/HP5-主旋律人声vocals+其他instrumentals.pth> -d /content/Retrieval-based-Voice-Conversion-WebUI/weights -o HP5-主旋律人声vocals+其他instrumentals.pth
!aria2c --console-log-level=error -c -x 16 -s 16 -k 1M <https://huggingface.co/lj1995/VoiceConversionWebUI/resolve/main/hubert_base.pt> -d /content/Retrieval-based-Voice-Conversion-WebUI -o hubert_base.pt

!pip install pyngrok==4.1.12 --quiet

dataset_map = {
"jungkook": "<https://drive.google.com/file/d/1i5CMlcnKfVEpzD5wvwjWytrwT2fT74g6/view?usp=share_link>",
"BENEE": "<https://drive.google.com/file/d/1nuUVTqg1_lja7JFctjd18W9hQ7rk7aDZ/view?usp=share_link>",
"shiloh": "<https://drive.google.com/file/d/12nk3OOEVdhWB6eBNYN5Gw0wbifIYx_fF/view?usp=share_link>",
"CHASEATLANTIC": "<https://drive.google.com/file/d/1-Ayoue7g4eOPiyc6wgCCWmIrkfldThJb/view?usp=share_link>",
}

from mega import Mega
import os
for MODEL, url in dataset_map.items():
  MODELEPOCH = ''
  MODELZIP = MODEL + '.zip'
  modelname_path='/content/zips/'+MODELZIP

  !mkdir -p /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODEL}
  !mkdir -p /content/zips/
  if "drive.google.com" in url:
    !gdown $url --fuzzy -O $modelname_path
  elif "mega.nz" in url:
    m = Mega()
    m.download_url(url, '/content/zips')
  else:
    !wget $url -O /content/zips/{MODELZIP}
  for filename in os.listdir("/content/zips"):
    if filename.endswith(".zip"):
      zip_file = os.path.join("/content/zips", filename)
      shutil.unpack_archive(zip_file, "/content/unzips", 'zip')
  #Move model into logs folder
  for root, dirs, files in os.walk('/content/unzips'):
    for file in files:
      if "G_" in file:
        MODELEPOCH = file.split("G_")[1].split(".")[0]
    if MODELEPOCH == '':
      MODELEPOCH = '404'
    for file in files:
      file_path = os.path.join(root, file)
      if file.endswith(".npy") or file.endswith(".index"):
        !mv {file_path} /content/Retrieval-based-Voice-Conversion-WebUI/logs/{MODEL}/
      elif "G_" not in file and "D_" not in file and file.endswith(".pth"):
        !mv {file_path} /content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODEL}.pth

  !rm -r /content/unzips/
  !rm -r /content/zips/
  clear_output()
  MODEL = "jungkook"
  MODELPATH=f'/content/Retrieval-based-Voice-Conversion-WebUI/weights/{MODEL}.pth'
  import torch
  model_params = torch.load(MODELPATH)
  param_names = list(model_params.keys())
  for key in model_params.keys():
    if key == 'info':
       print('Epochs: '+ model_params[str(key)])
    if key == 'sr':
       print('Sample Rate: '+ model_params[str(key)])

!python3 easy-infer.py --colab --pycmd python3