Compare commits

...

27 Commits

Author SHA1 Message Date
Fangjun Kuang
9165e73a3b
Support torch 2.8.0 (#120) 2025-08-07 21:09:15 +08:00
Fangjun Kuang
8cda08e0c5
support torch 2.7.1 (#119) 2025-06-05 11:30:38 +08:00
Fangjun Kuang
6edd6a8370
support torch 2.7.0 (#118) 2025-06-05 10:47:36 +08:00
Fangjun Kuang
0ecdee6e88
Support Linux arm64 (#114) 2025-03-07 14:22:18 +08:00
Fangjun Kuang
3f79fbbd6d
Support torch 2.6.0 (#112) 2025-01-30 08:45:49 +08:00
Fangjun Kuang
c1aefb643e
Support numpy 2 0 and torch 2.5.1 (#110) 2024-10-30 12:45:45 +08:00
Fangjun Kuang
e485c5749b
Support torch 2.5.0 (#109) 2024-10-18 11:04:11 +08:00
Fangjun Kuang
e05540cfad
Allow all zero rows in mel computation matrix. (#107) 2024-09-14 12:02:20 +08:00
Fangjun Kuang
5f9602afab
support for torch 2.4.1 (#106) 2024-09-07 17:32:29 +08:00
Fangjun Kuang
a39a011de9
Update readme for how to install kaldifeat (#105) 2024-08-09 14:55:47 +08:00
Karel Vesely
eeefc87660
switch C++14 -> C++17, so it becomes compatible with Pytorch 2.4 (#103) 2024-08-01 06:45:02 +08:00
Fangjun Kuang
0355ed8ced
support torch2.4.0 (#102) 2024-07-25 15:17:44 +08:00
Fangjun Kuang
f36767ed1d
support torch 2.3.1 (#100) 2024-06-06 11:00:41 +08:00
Fangjun Kuang
40cc0a4a2c
Support torch 2.3.0 (#99) 2024-04-25 21:28:17 +08:00
Fangjun Kuang
843763fd05
support torch 2.2.2 (#98) 2024-04-25 20:49:43 +08:00
Fangjun Kuang
5dee672556
support torch 2.2.1 (#96) 2024-02-23 13:48:33 +08:00
Fangjun Kuang
2a8a993f15
Fix building wheels for torch 2.2.0. (#95)
See also https://github.com/pytorch/pytorch/issues/120020
2024-02-18 20:30:18 +08:00
Fangjun Kuang
2e042b356e
Support torch 2.2.0 (#94) 2024-02-10 21:27:19 +08:00
Fangjun Kuang
b75abef4c0
update install doc (#90) 2024-01-14 14:28:44 +08:00
Fangjun Kuang
4fd12b48b8
Support torch 2.1.2 (#89) 2023-12-21 21:43:52 +08:00
Dadoou
f834ad861d
Update whisper-fbank.h: correct Dim() function (#88) 2023-12-19 15:55:20 +08:00
Fangjun Kuang
352aa43ce3
support torch 2.1.1 (#85) 2023-11-26 17:04:52 +08:00
Fangjun Kuang
c1adbda9f0
Update README.md 2023-11-09 12:48:57 +08:00
Fangjun Kuang
2624da8275
support whisper v3 (#84) 2023-11-09 12:45:56 +08:00
Fangjun Kuang
20379449fc
Update readme to include whisper (#83) 2023-11-08 19:36:22 +08:00
Fangjun Kuang
01aed93b1b
Support computing features for whisper (#82) 2023-11-08 19:22:32 +08:00
Fangjun Kuang
7912c2f442
Fix building wheels for torch 2.1.x (#81) 2023-11-03 10:16:32 +08:00
52 changed files with 7787 additions and 290 deletions

View File

@ -35,7 +35,7 @@ jobs:
python-version: [3.8]
steps:
# refer to https://github.com/actions/checkout
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
@ -54,6 +54,7 @@ jobs:
ls -lh $KALDIFEAT_DIR
export GIT_LFS_SKIP_SMUDGE=1
export GIT_CLONE_PROTECTION_ACTIVE=false
git clone https://huggingface.co/csukuangfj/kaldifeat huggingface
cd huggingface
@ -68,6 +69,8 @@ jobs:
make html
cp source/cpu.html build/html/
cp source/cuda.html build/html/
cp source/cpu-cn.html build/html/
cp source/cuda-cn.html build/html/
touch build/html/.nojekyll
- name: Deploy

View File

@ -2,6 +2,9 @@ name: build-wheels-cpu-macos
on:
push:
branches:
# - wheel
- torch-2.8.0
tags:
- '*'
workflow_dispatch:
@ -17,48 +20,72 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Generating build matrix
id: set-matrix
run: |
# outputting for debugging purposes
python ./scripts/github_actions/generate_build_matrix.py --for-macos
MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py --for-macos)
# python ./scripts/github_actions/generate_build_matrix.py --for-macos
# MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py --for-macos)
python ./scripts/github_actions/generate_build_matrix.py --for-macos --test-only-latest-torch
MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py --for-macos --test-only-latest-torch)
echo "::set-output name=matrix::${MATRIX}"
build_wheels_macos_cpu:
needs: generate_build_matrix
name: ${{ matrix.torch }} ${{ matrix.python-version }}
runs-on: macos-latest
runs-on: macos-14
strategy:
fail-fast: false
matrix:
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
# see https://cibuildwheel.readthedocs.io/en/stable/changelog/
# for a list of versions
- name: Build wheels
uses: pypa/cibuildwheel@v2.11.4
env:
CIBW_BEFORE_BUILD: pip install torch==${{ matrix.torch}} cmake numpy
CIBW_BUILD: ${{ matrix.python-version }}-*
CIBW_REPAIR_WHEEL_COMMAND_MACOS: ""
CIBW_BUILD_VERBOSITY: 3
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Display wheels
- name: Install dependencies
shell: bash
run: |
pip install -q torch==${{ matrix.torch}} cmake numpy wheel>=0.40.0 twine setuptools
- name: Build wheel
shell: bash
run: |
python3 setup.py bdist_wheel
mkdir wheelhouse
cp -v dist/* wheelhouse
- name: Display wheels (before fix)
shell: bash
run: |
ls -lh ./wheelhouse/
- name: Fix wheel platform tag
run: |
# See https://github.com/glencoesoftware/zeroc-ice-py-macos-x86_64/pull/3/files
# See:
# * https://github.com/pypa/wheel/issues/406
python -m wheel tags \
--platform-tag=macosx_11_0_arm64 \
--remove wheelhouse/*.whl
- name: Display wheels (after fix)
shell: bash
run: |
ls -lh ./wheelhouse/
- name: Upload Wheel
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-macos-latest-cpu
path: wheelhouse/*.whl
@ -84,8 +111,9 @@ jobs:
cd huggingface
git pull
mkdir -p macos
cp -v ../wheelhouse/*.whl ./macos
d=cpu/1.25.5.dev20241029/macos
mkdir -p $d
cp -v ../wheelhouse/*.whl ./$d
git status
git lfs track "*.whl"
git add .

View File

@ -26,7 +26,7 @@ jobs:
pypi:
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0

View File

@ -32,7 +32,7 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Generating build matrix
@ -45,14 +45,14 @@ jobs:
run_tests_macos_cpu:
needs: generate_build_matrix
runs-on: macos-10.15
runs-on: macos-latest
strategy:
fail-fast: false
matrix:
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
@ -66,7 +66,7 @@ jobs:
run: |
python3 -m pip install -qq --upgrade pip
python3 -m pip install -qq wheel twine typing_extensions soundfile numpy
python3 -m pip install -qq torch==${{ matrix.torch }} -f https://download.pytorch.org/whl/torch_stable.html
python3 -m pip install -qq torch==${{ matrix.torch }} -f https://download.pytorch.org/whl/torch_stable.html || python3 -m pip install -qq torch==${{ matrix.torch }} -f https://download.pytorch.org/whl/torch/
python3 -c "import torch; print('torch version:', torch.__version__)"
@ -75,7 +75,7 @@ jobs:
run: |
mkdir build_release
cd build_release
cmake ..
cmake -DCMAKE_CXX_STANDARD=17 ..
make VERBOSE=1 -j3
- name: Run tests

View File

@ -32,7 +32,7 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Generating build matrix
@ -52,7 +52,7 @@ jobs:
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
@ -61,12 +61,6 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
- name: Install GCC 7
run: |
sudo apt-get install -y gcc-7 g++-7
echo "CC=/usr/bin/gcc-7" >> $GITHUB_ENV
echo "CXX=/usr/bin/g++-7" >> $GITHUB_ENV
- name: Install PyTorch ${{ matrix.torch }}
shell: bash
run: |
@ -75,7 +69,7 @@ jobs:
python3 -m pip install --upgrade pip
python3 -m pip install wheel twine typing_extensions soundfile
python3 -m pip install bs4 requests tqdm numpy
python3 -m pip install -qq torch==${{ matrix.torch }}+cpu -f https://download.pytorch.org/whl/torch_stable.html
python3 -m pip install -qq torch==${{ matrix.torch }}+cpu -f https://download.pytorch.org/whl/torch_stable.html || python3 -m pip install -qq torch==${{ matrix.torch }}+cpu -f https://download.pytorch.org/whl/torch/
python3 -c "import torch; print('torch version:', torch.__version__)"
@ -84,7 +78,7 @@ jobs:
run: |
mkdir build_release
cd build_release
cmake ..
cmake -DCMAKE_CXX_STANDARD=17 ..
make VERBOSE=1 -j3
- name: Run tests

View File

@ -32,7 +32,7 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Generating build matrix
@ -45,14 +45,14 @@ jobs:
run_tests_ubuntu_cuda:
needs: generate_build_matrix
runs-on: ubuntu-18.04
runs-on: ubuntu-latest
strategy:
fail-fast: false
matrix:
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
@ -61,14 +61,6 @@ jobs:
with:
python-version: ${{ matrix.python-version }}
- name: Install GCC 7
if: startsWith(matrix.os, 'ubuntu')
run: |
sudo apt-get install -y gcc-7 g++-7
echo "CC=/usr/bin/gcc-7" >> $GITHUB_ENV
echo "CXX=/usr/bin/g++-7" >> $GITHUB_ENV
- name: Install CUDA Toolkit ${{ matrix.cuda }}
shell: bash
env:
@ -110,7 +102,7 @@ jobs:
run: |
mkdir build_release
cd build_release
cmake ..
cmake -DCMAKE_CXX_STANDARD=17 ..
make VERBOSE=1 -j3
- name: Run tests

View File

@ -32,7 +32,7 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Generating build matrix
@ -46,14 +46,14 @@ jobs:
run_tests_windows_cpu:
# see https://github.com/actions/virtual-environments/blob/win19/20210525.0/images/win/Windows2019-Readme.md
needs: generate_build_matrix
runs-on: windows-2019
runs-on: windows-latest
strategy:
fail-fast: false
matrix:
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
@ -71,7 +71,7 @@ jobs:
- name: Install PyTorch ${{ matrix.torch }}
run: |
pip3 install -qq torch==${{ matrix.torch }}+cpu -f https://download.pytorch.org/whl/torch_stable.html
pip3 install -qq torch==${{ matrix.torch }}+cpu -f https://download.pytorch.org/whl/torch_stable.html || pip3 install -qq torch==${{ matrix.torch }}+cpu -f https://download.pytorch.org/whl/torch/
pip3 install -qq wheel twine dataclasses numpy typing_extensions soundfile
- name: Display CMake version
@ -90,7 +90,7 @@ jobs:
- name: Build kaldifeat
run: |
cd build_release
cmake --build . --target _kaldifeat --config Release
cmake --build -DCMAKE_CXX_STANDARD=17 . --target _kaldifeat --config Release
- name: Display generated files
shell: bash
@ -107,7 +107,7 @@ jobs:
python3 -c "import kaldifeat; print(kaldifeat.__version__)"
- name: Upload Wheel
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
name: python-${{ matrix.python-version }}-${{ matrix.os }}-cpu
path: dist/*.whl
@ -116,6 +116,6 @@ jobs:
shell: bash
run: |
cd build_release
cmake --build . --target ALL_BUILD --config Release
cmake -DCMAKE_CXX_STANDARD=17 --build . --target ALL_BUILD --config Release
ls -lh bin/*/*
ctest -C Release --verbose --output-on-failure

View File

@ -33,7 +33,7 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Generating build matrix
@ -46,14 +46,14 @@ jobs:
run_tests_windows_cuda:
needs: generate_build_matrix
runs-on: windows-2019
runs-on: windows-latest
strategy:
fail-fast: false
matrix:
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
@ -113,7 +113,7 @@ jobs:
python3 -m pip install -qq --upgrade pip
python3 -m pip install -qq wheel twine numpy typing_extensions
python3 -m pip install -qq dataclasses soundfile numpy
python3 -m pip install -qq torch==${{ matrix.torch }}${v} -f https://download.pytorch.org/whl/torch_stable.html numpy
python3 -m pip install -qq torch==${{ matrix.torch }}${v} -f https://download.pytorch.org/whl/torch_stable.html numpy || python3 -m pip install -qq torch==${{ matrix.torch }}${v} -f https://download.pytorch.org/whl/torch/ numpy
python3 -c "import torch; print('torch version:', torch.__version__)"
@ -159,7 +159,7 @@ jobs:
python3 -c "import kaldifeat; print(kaldifeat.__version__)"
- name: Upload Wheel
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
name: python-${{ matrix.python-version }}-${{ matrix.os }}-cuda-${{ matrix.cuda }}
path: dist/*.whl
@ -168,6 +168,6 @@ jobs:
shell: bash
run: |
cd build_release
cmake --build . --target ALL_BUILD --config Release
cmake -DCMAKE_CXX_STANDARD=17 --build . --target ALL_BUILD --config Release
ls -lh bin/*/*
ctest -C Release --verbose --output-on-failure

View File

@ -29,12 +29,12 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [ubuntu-18.04, macos-10.15]
python-version: [3.7, 3.8, 3.9]
os: [ubuntu-latest, macos-latest]
python-version: ["3.8"]
fail-fast: false
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0

View File

@ -20,7 +20,7 @@ jobs:
python-version: ["3.8", "3.9", "3.10"]
steps:
# refer to https://github.com/actions/checkout
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
@ -47,7 +47,7 @@ jobs:
if: startsWith(matrix.os, 'ubuntu') || startsWith(matrix.os, 'windows')
shell: bash
run: |
pip install torch==${{ github.event.inputs.torch_version }}+cpu -f https://download.pytorch.org/whl/torch_stable.html
pip install torch==${{ github.event.inputs.torch_version }}+cpu -f https://download.pytorch.org/whl/torch_stable.html || pip install torch==${{ github.event.inputs.torch_version }}+cpu -f https://download.pytorch.org/whl/torch/
- name: Install kaldifeat
shell: bash

View File

@ -0,0 +1,168 @@
name: build-wheels-cpu-arm64-ubuntu
on:
push:
branches:
# - wheel
- torch-2.8.0
tags:
- '*'
workflow_dispatch:
concurrency:
group: build-wheels-cpu-arm64-ubuntu-${{ github.ref }}
cancel-in-progress: true
jobs:
generate_build_matrix:
# see https://github.com/pytorch/pytorch/pull/50633
runs-on: ubuntu-latest
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Generating build matrix
id: set-matrix
run: |
# outputting for debugging purposes
# python ./scripts/github_actions/generate_build_matrix.py --for-arm64
# MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py --for-arm64)
python ./scripts/github_actions/generate_build_matrix.py --test-only-latest-torch --for-arm64
MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py --test-only-latest-torch --for-arm64)
echo "::set-output name=matrix::${MATRIX}"
build-manylinux-wheels:
needs: generate_build_matrix
name: ${{ matrix.torch }} ${{ matrix.python-version }}
runs-on: ubuntu-22.04-arm
strategy:
fail-fast: false
matrix:
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
steps:
- uses: actions/checkout@v4
with:
fetch-depth: 0
# see https://github.com/pytorch/test-infra/blob/9e3d392690719fac85bad0c9b67f530e48375ca1/tools/scripts/generate_binary_build_matrix.py
# https://github.com/pytorch/builder/tree/main/manywheel
# https://github.com/pytorch/builder/pull/476
# https://github.com/k2-fsa/k2/issues/733
# https://github.com/pytorch/pytorch/pull/50633 (generate build matrix)
- name: Run the build process with Docker
uses: addnab/docker-run-action@v3
with:
image: ${{ matrix.image }}
options: -v ${{ github.workspace }}:/var/www -e IS_2_28=${{ matrix.is_2_28 }} -e PYTHON_VERSION=${{ matrix.python-version }} -e TORCH_VERSION=${{ matrix.torch }}
run: |
echo "pwd: $PWD"
uname -a
id
cat /etc/*release
gcc --version
python3 --version
which python3
ls -lh /opt/python/
echo "---"
ls -lh /opt/python/cp*
ls -lh /opt/python/*/bin
echo "---"
find /opt/python/cp* -name "libpython*"
echo "-----"
find /opt/_internal/cp* -name "libpython*"
echo "-----"
find / -name "libpython*"
echo "----"
ls -lh /usr/lib64/libpython3.so
# cp36-cp36m
# cp37-cp37m
# cp38-cp38
# cp39-cp39
# cp310-cp310
# cp311-cp311
# cp312-cp312
# cp313-cp313
# cp313-cp313t (no gil)
if [[ $PYTHON_VERSION == "3.6" ]]; then
python_dir=/opt/python/cp36-cp36m
export PYTHONPATH=/opt/python/cp36-cp36m/lib/python3.6/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.7" ]]; then
python_dir=/opt/python/cp37-cp37m
export PYTHONPATH=/opt/python/cp37-cp37m/lib/python3.7/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.8" ]]; then
python_dir=/opt/python/cp38-cp38
export PYTHONPATH=/opt/python/cp38-cp38/lib/python3.8/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.9" ]]; then
python_dir=/opt/python/cp39-cp39
export PYTHONPATH=/opt/python/cp39-cp39/lib/python3.9/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.10" ]]; then
python_dir=/opt/python/cp310-cp310
export PYTHONPATH=/opt/python/cp310-cp310/lib/python3.10/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.11" ]]; then
python_dir=/opt/python/cp311-cp311
export PYTHONPATH=/opt/python/cp311-cp311/lib/python3.11/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.12" ]]; then
python_dir=/opt/python/cp312-cp312
export PYTHONPATH=/opt/python/cp312-cp312/lib/python3.12/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.13" ]]; then
python_dir=/opt/python/cp313-cp313
export PYTHONPATH=/opt/python/cp313-cp313/lib/python3.13/site-packages:$PYTHONPATH
else
echo "Unsupported Python version $PYTHON_VERSION"
exit 1
fi
export PYTHON_INSTALL_DIR=$python_dir
export PATH=$PYTHON_INSTALL_DIR/bin:$PATH
python3 --version
which python3
/var/www/scripts/github_actions/build-ubuntu-cpu-arm64.sh
- name: Display wheels
shell: bash
run: |
ls -lh ./wheelhouse/
# https://huggingface.co/docs/hub/spaces-github-actions
- name: Publish to huggingface
if: github.repository_owner == 'csukuangfj'
env:
HF_TOKEN: ${{ secrets.HF_TOKEN }}
uses: nick-fields/retry@v2
with:
max_attempts: 20
timeout_seconds: 200
shell: bash
command: |
git config --global user.email "csukuangfj@gmail.com"
git config --global user.name "Fangjun Kuang"
rm -rf huggingface
export GIT_LFS_SKIP_SMUDGE=1
git clone https://huggingface.co/csukuangfj/kaldifeat huggingface
cd huggingface
git pull
d=cpu/1.25.5.dev20250307/linux-arm64
mkdir -p $d
cp -v ../wheelhouse/*.whl ./$d
git status
git lfs track "*.whl"
git add .
git commit -m "upload ubuntu-arm64-cpu wheel for torch ${{ matrix.torch }} python ${{ matrix.python-version }}"
git push https://csukuangfj:$HF_TOKEN@huggingface.co/csukuangfj/kaldifeat main

View File

@ -2,6 +2,9 @@ name: build-wheels-cpu-ubuntu
on:
push:
branches:
# - wheel
- torch-2.8.0
tags:
- '*'
workflow_dispatch:
@ -17,15 +20,18 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Generating build matrix
id: set-matrix
run: |
# outputting for debugging purposes
python ./scripts/github_actions/generate_build_matrix.py
MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py)
# python ./scripts/github_actions/generate_build_matrix.py
# MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py)
python ./scripts/github_actions/generate_build_matrix.py --test-only-latest-torch
MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py --test-only-latest-torch)
echo "::set-output name=matrix::${MATRIX}"
build-manylinux-wheels:
@ -38,7 +44,7 @@ jobs:
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
@ -51,7 +57,7 @@ jobs:
uses: addnab/docker-run-action@v3
with:
image: ${{ matrix.image }}
options: -v ${{ github.workspace }}:/var/www -e PYTHON_VERSION=${{ matrix.python-version }} -e TORCH_VERSION=${{ matrix.torch }}
options: -v ${{ github.workspace }}:/var/www -e IS_2_28=${{ matrix.is_2_28 }} -e PYTHON_VERSION=${{ matrix.python-version }} -e TORCH_VERSION=${{ matrix.torch }}
run: |
echo "pwd: $PWD"
uname -a
@ -60,6 +66,69 @@ jobs:
gcc --version
python3 --version
which python3
ls -lh /opt/python/
echo "---"
ls -lh /opt/python/cp*
ls -lh /opt/python/*/bin
echo "---"
find /opt/python/cp* -name "libpython*"
echo "-----"
find /opt/_internal/cp* -name "libpython*"
echo "-----"
find / -name "libpython*"
echo "----"
ls -lh /usr/lib64/libpython3.so || true
# cp36-cp36m
# cp37-cp37m
# cp38-cp38
# cp39-cp39
# cp310-cp310
# cp311-cp311
# cp312-cp312
# cp313-cp313
# cp313-cp313t (no gil)
if [[ $PYTHON_VERSION == "3.6" ]]; then
python_dir=/opt/python/cp36-cp36m
export PYTHONPATH=/opt/python/cp36-cp36m/lib/python3.6/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.7" ]]; then
python_dir=/opt/python/cp37-cp37m
export PYTHONPATH=/opt/python/cp37-cp37m/lib/python3.7/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.8" ]]; then
python_dir=/opt/python/cp38-cp38
export PYTHONPATH=/opt/python/cp38-cp38/lib/python3.8/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.9" ]]; then
python_dir=/opt/python/cp39-cp39
export PYTHONPATH=/opt/python/cp39-cp39/lib/python3.9/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.10" ]]; then
python_dir=/opt/python/cp310-cp310
export PYTHONPATH=/opt/python/cp310-cp310/lib/python3.10/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.11" ]]; then
python_dir=/opt/python/cp311-cp311
export PYTHONPATH=/opt/python/cp311-cp311/lib/python3.11/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.12" ]]; then
python_dir=/opt/python/cp312-cp312
export PYTHONPATH=/opt/python/cp312-cp312/lib/python3.12/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.13" ]]; then
python_dir=/opt/python/cp313-cp313
export PYTHONPATH=/opt/python/cp313-cp313/lib/python3.13/site-packages:$PYTHONPATH
else
echo "Unsupported Python version $PYTHON_VERSION"
exit 1
fi
export PYTHON_INSTALL_DIR=$python_dir
export PATH=$PYTHON_INSTALL_DIR/bin:$PATH
python3 --version
which python3
/var/www/scripts/github_actions/build-ubuntu-cpu.sh
- name: Display wheels
@ -67,12 +136,6 @@ jobs:
run: |
ls -lh ./wheelhouse/
- name: Upload Wheel
uses: actions/upload-artifact@v2
with:
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cpu
path: wheelhouse/*.whl
# https://huggingface.co/docs/hub/spaces-github-actions
- name: Publish to huggingface
if: github.repository_owner == 'csukuangfj'
@ -95,8 +158,9 @@ jobs:
cd huggingface
git pull
mkdir -p ubuntu-cpu
cp -v ../wheelhouse/*.whl ./ubuntu-cpu
d=cpu/1.25.5.dev20250307/linux-x64
mkdir -p $d
cp -v ../wheelhouse/*.whl ./$d
git status
git lfs track "*.whl"
git add .

View File

@ -2,6 +2,9 @@ name: build-wheels-cuda-ubuntu
on:
push:
branches:
- wheel
# - torch-2.7.1
tags:
- '*'
workflow_dispatch:
@ -17,15 +20,18 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Generating build matrix
id: set-matrix
run: |
# outputting for debugging purposes
python ./scripts/github_actions/generate_build_matrix.py --enable-cuda
MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py --enable-cuda)
# python ./scripts/github_actions/generate_build_matrix.py --enable-cuda
# MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py --enable-cuda)
python ./scripts/github_actions/generate_build_matrix.py --enable-cuda --test-only-latest-torch
MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py --enable-cuda --test-only-latest-torch)
echo "::set-output name=matrix::${MATRIX}"
build-manylinux-wheels:
@ -38,10 +44,19 @@ jobs:
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Free space
shell: bash
run: |
df -h
rm -rf /opt/hostedtoolcache
df -h
echo "pwd: $PWD"
echo "github.workspace ${{ github.workspace }}"
# see https://github.com/pytorch/test-infra/blob/9e3d392690719fac85bad0c9b67f530e48375ca1/tools/scripts/generate_binary_build_matrix.py
# https://github.com/pytorch/builder/tree/main/manywheel
# https://github.com/pytorch/builder/pull/476
@ -51,7 +66,7 @@ jobs:
uses: addnab/docker-run-action@v3
with:
image: ${{ matrix.image }}
options: -v ${{ github.workspace }}:/var/www -e PYTHON_VERSION=${{ matrix.python-version }} -e TORCH_VERSION=${{ matrix.torch }} -e CUDA_VERSION=${{ matrix.cuda }}
options: -v ${{ github.workspace }}:/var/www -e IS_2_28=${{ matrix.is_2_28 }} -e PYTHON_VERSION=${{ matrix.python-version }} -e TORCH_VERSION=${{ matrix.torch }} -e CUDA_VERSION=${{ matrix.cuda }}
run: |
echo "pwd: $PWD"
uname -a
@ -61,6 +76,69 @@ jobs:
python3 --version
which python3
ls -lh /opt/python/
echo "---"
ls -lh /opt/python/cp*
ls -lh /opt/python/*/bin
echo "---"
find /opt/python/cp* -name "libpython*"
echo "-----"
find /opt/_internal/cp* -name "libpython*"
echo "-----"
find / -name "libpython*"
# cp36-cp36m
# cp37-cp37m
# cp38-cp38
# cp39-cp39
# cp310-cp310
# cp311-cp311
# cp312-cp312
# cp313-cp313
# cp313-cp313t (no gil)
if [[ $PYTHON_VERSION == "3.6" ]]; then
python_dir=/opt/python/cp36-cp36m
export PYTHONPATH=/opt/python/cp36-cp36m/lib/python3.6/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.7" ]]; then
python_dir=/opt/python/cp37-cp37m
export PYTHONPATH=/opt/python/cp37-cp37m/lib/python3.7/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.8" ]]; then
python_dir=/opt/python/cp38-cp38
export PYTHONPATH=/opt/python/cp38-cp38/lib/python3.8/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.9" ]]; then
python_dir=/opt/python/cp39-cp39
export PYTHONPATH=/opt/python/cp39-cp39/lib/python3.9/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.10" ]]; then
python_dir=/opt/python/cp310-cp310
export PYTHONPATH=/opt/python/cp310-cp310/lib/python3.10/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.11" ]]; then
python_dir=/opt/python/cp311-cp311
export PYTHONPATH=/opt/python/cp311-cp311/lib/python3.11/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.12" ]]; then
python_dir=/opt/python/cp312-cp312
export PYTHONPATH=/opt/python/cp312-cp312/lib/python3.12/site-packages:$PYTHONPATH
elif [[ $PYTHON_VERSION == "3.13" ]]; then
python_dir=/opt/python/cp313-cp313
export PYTHONPATH=/opt/python/cp313-cp313/lib/python3.13/site-packages:$PYTHONPATH
else
echo "Unsupported Python version $PYTHON_VERSION"
exit 1
fi
export PYTHON_INSTALL_DIR=$python_dir
export PATH=$PYTHON_INSTALL_DIR/bin:$PATH
# There are no libpython.so inside $PYTHON_INSTALL_DIR
# since they are statically linked.
python3 --version
which python3
pushd /usr/local
rm cuda
ln -s cuda-$CUDA_VERSION cuda
@ -79,9 +157,10 @@ jobs:
ls -lh ./wheelhouse/
- name: Upload Wheel
uses: actions/upload-artifact@v2
if: false
uses: actions/upload-artifact@v4
with:
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cuda
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-ubuntu-latest-cuda-is_2_28-${{ matrix.is_2_28 }}
path: wheelhouse/*.whl
# https://huggingface.co/docs/hub/spaces-github-actions
@ -105,8 +184,9 @@ jobs:
cd huggingface
git pull
mkdir -p ubuntu-cuda
cp -v ../wheelhouse/*.whl ./ubuntu-cuda
d=cuda/1.25.5.dev20241029/linux
mkdir -p $d
cp -v ../wheelhouse/*.whl ./$d
git status
git lfs track "*.whl"
git add .

View File

@ -2,6 +2,9 @@ name: build-wheels-cpu-win64
on:
push:
branches:
# - wheel
- torch-2.8.0
tags:
- '*'
workflow_dispatch:
@ -17,15 +20,18 @@ jobs:
outputs:
matrix: ${{ steps.set-matrix.outputs.matrix }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Generating build matrix
id: set-matrix
run: |
# outputting for debugging purposes
python ./scripts/github_actions/generate_build_matrix.py --for-windows
MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py --for-windows)
# python ./scripts/github_actions/generate_build_matrix.py --for-windows
# MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py --for-windows)
python ./scripts/github_actions/generate_build_matrix.py --for-windows --test-only-latest-torch
MATRIX=$(python ./scripts/github_actions/generate_build_matrix.py --for-windows --test-only-latest-torch)
echo "::set-output name=matrix::${MATRIX}"
build_wheels_win64_cpu:
@ -38,19 +44,27 @@ jobs:
${{ fromJson(needs.generate_build_matrix.outputs.matrix) }}
steps:
- uses: actions/checkout@v2
- uses: actions/checkout@v4
with:
fetch-depth: 0
# see https://cibuildwheel.readthedocs.io/en/stable/changelog/
# for a list of versions
- name: Build wheels
uses: pypa/cibuildwheel@v2.11.4
env:
CIBW_BEFORE_BUILD: pip install torch==${{ matrix.torch}}+cpu -f https://download.pytorch.org/whl/torch_stable.html cmake numpy
CIBW_BUILD: ${{ matrix.python-version }}-win_amd64
CIBW_REPAIR_WHEEL_COMMAND_WINDOWS: ""
CIBW_BUILD_VERBOSITY: 3
- name: Setup Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
shell: bash
run: |
pip install -q torch==${{ matrix.torch}} cmake numpy wheel>=0.40.0 twine setuptools
pip install torch==${{ matrix.torch}}+cpu -f https://download.pytorch.org/whl/torch_stable.html cmake numpy || pip install torch==${{ matrix.torch}}+cpu -f https://download.pytorch.org/whl/torch/ cmake numpy
- name: Build wheel
shell: bash
run: |
python3 setup.py bdist_wheel
mkdir wheelhouse
cp -v dist/* wheelhouse
- name: Display wheels
shell: bash
@ -58,7 +72,7 @@ jobs:
ls -lh ./wheelhouse/
- name: Upload Wheel
uses: actions/upload-artifact@v2
uses: actions/upload-artifact@v4
with:
name: torch-${{ matrix.torch }}-python-${{ matrix.python-version }}-windows-latest-cpu
path: wheelhouse/*.whl
@ -84,8 +98,9 @@ jobs:
cd huggingface
git pull
mkdir -p windows-cpu
cp -v ../wheelhouse/*.whl ./windows-cpu
d=cpu/1.25.5.dev20241029/windows
mkdir -p $d
cp -v ../wheelhouse/*.whl ./$d
git status
git lfs track "*.whl"
git add .

2
.gitignore vendored
View File

@ -6,3 +6,5 @@ __pycache__/
test-1hour.wav
path.sh
torch_version.py
cpu*.html
cuda*.html

View File

@ -1,4 +1,7 @@
# Copyright (c) 2021 Xiaomi Corporation (author: Fangjun Kuang)
if (CMAKE_VERSION VERSION_GREATER_EQUAL "4.0.0")
set(CMAKE_POLICY_VERSION_MINIMUM 3.5)
endif()
cmake_minimum_required(VERSION 3.8 FATAL_ERROR)
@ -7,7 +10,7 @@ project(kaldifeat)
# remember to change the version in
# scripts/conda/kaldifeat/meta.yaml
# scripts/conda-cpu/kaldifeat/meta.yaml
set(kaldifeat_VERSION "1.25.1")
set(kaldifeat_VERSION "1.25.5")
set(CMAKE_ARCHIVE_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
set(CMAKE_LIBRARY_OUTPUT_DIRECTORY "${CMAKE_BINARY_DIR}/lib")
@ -17,6 +20,7 @@ set(CMAKE_SKIP_BUILD_RPATH FALSE)
set(BUILD_RPATH_USE_ORIGIN TRUE)
set(CMAKE_INSTALL_RPATH_USE_LINK_PATH TRUE)
if(NOT APPLE)
set(kaldifeat_rpath_origin "$ORIGIN")
else()
@ -32,16 +36,17 @@ if(NOT CMAKE_BUILD_TYPE)
set(CMAKE_BUILD_TYPE Release)
endif()
set(CMAKE_CXX_STANDARD 14 CACHE STRING "The C++ version to be used.")
set(CMAKE_CXX_EXTENSIONS OFF)
if (NOT CMAKE_CXX_STANDARD)
set(CMAKE_CXX_STANDARD 17 CACHE STRING "The C++ version to be used.")
endif()
message(STATUS "C++ Standard version: ${CMAKE_CXX_STANDARD}")
set(CMAKE_CXX_EXTENSIONS OFF)
list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake/Modules)
list(APPEND CMAKE_MODULE_PATH ${CMAKE_CURRENT_SOURCE_DIR}/cmake)
option(BUILD_SHARED_LIBS "Whether to build shared libraries" ON)
option(kaldifeat_BUILD_TESTS "Whether to build tests or not" ON)
option(kaldifeat_BUILD_TESTS "Whether to build tests or not" OFF)
option(kaldifeat_BUILD_PYMODULE "Whether to build python module or not" ON)
message(STATUS "BUILD_SHARED_LIBS ${BUILD_SHARED_LIBS}")

View File

@ -14,6 +14,20 @@
**Note**: If you are looking for a version that does not depend on PyTorch,
please see <https://github.com/csukuangfj/kaldi-native-fbank>
# Installation
Refer to
<https://csukuangfj.github.io/kaldifeat/installation/from_wheels.html>
for installation.
> Never use `pip install kaldifeat`
> Never use `pip install kaldifeat`
> Never use `pip install kaldifeat`
<sub>
<table>
<tr>
@ -23,6 +37,36 @@ please see <https://github.com/csukuangfj/kaldi-native-fbank>
<th>Usage</th>
</tr>
<tr>
<td>Fbank for <a href="https://github.com/openai/whisper">Whisper</a></td>
<td><code>kaldifeat.WhisperFbankOptions</code></td>
<td><code>kaldifeat.WhisperFbank</code></td>
<td>
<pre lang="python">
opts = kaldifeat.WhisperFbankOptions()
opts.device = torch.device('cuda', 0)
fbank = kaldifeat.WhisperFbank(opts)
features = fbank(wave)
</pre>
See <a href="https://github.com/csukuangfj/kaldifeat/pull/82">#82</a>
</td>
</tr>
<tr>
<td>Fbank for <a href="https://github.com/openai/whisper">Whisper-V3</a></td>
<td><code>kaldifeat.WhisperFbankOptions</code></td>
<td><code>kaldifeat.WhisperFbank</code></td>
<td>
<pre lang="python">
opts = kaldifeat.WhisperFbankOptions()
opts.num_mels = 128
opts.device = torch.device('cuda', 0)
fbank = kaldifeat.WhisperFbank(opts)
features = fbank(wave)
</pre>
</td>
</tr>
<tr>
<td>FBANK</td>
<td><code>kaldifeat.FbankOptions</code></td>
@ -282,8 +326,4 @@ extraction.
See <https://github.com/k2-fsa/sherpa/blob/master/sherpa/bin/pruned_stateless_emformer_rnnt2/decode.py>
# Installation
Refer to
<https://csukuangfj.github.io/kaldifeat>
for installation.

View File

@ -8,9 +8,15 @@ import sys
from pathlib import Path
import setuptools
import torch
from setuptools.command.build_ext import build_ext
def get_pytorch_version():
# if it is 1.7.1+cuda101, then strip +cuda101
return torch.__version__.split("+")[0]
def is_for_pypi():
ans = os.environ.get("KALDIFEAT_IS_FOR_PYPI", None)
return ans is not None
@ -39,7 +45,6 @@ try:
# -linux_x86_64.whl
self.root_is_pure = False
except ImportError:
bdist_wheel = None
@ -70,6 +75,13 @@ class BuildExtension(build_ext):
extra_cmake_args = " -Dkaldifeat_BUILD_TESTS=OFF "
extra_cmake_args += f" -DCMAKE_INSTALL_PREFIX={Path(self.build_lib).resolve()}/kaldifeat " # noqa
major, minor = get_pytorch_version().split(".")[:2]
print("major, minor", major, minor)
major = int(major)
minor = int(minor)
if major > 2 or (major == 2 and minor >= 1):
extra_cmake_args += f" -DCMAKE_CXX_STANDARD=17 "
if "PYTHON_EXECUTABLE" not in cmake_args:
print(f"Setting PYTHON_EXECUTABLE to {sys.executable}")
cmake_args += f" -DPYTHON_EXECUTABLE={sys.executable}"
@ -103,9 +115,7 @@ class BuildExtension(build_ext):
else:
if make_args == "" and system_make_args == "":
print("For fast compilation, run:")
print(
'export KALDIFEAT_MAKE_ARGS="-j"; python setup.py install'
)
print('export KALDIFEAT_MAKE_ARGS="-j"; python setup.py install')
make_args = " -j4 "
print("Setting make_args to '-j4'")

View File

@ -8,19 +8,18 @@ function(download_pybind11)
include(FetchContent)
# latest commit as of 2022.10.31 that supports python 3.11
set(pybind11_URL "https://github.com/pybind/pybind11/archive/5bc0943ed96836f46489f53961f6c438d2935357.zip")
set(pybind11_URL2 "https://huggingface.co/csukuangfj/k2-cmake-deps/resolve/main/pybind11-5bc0943ed96836f46489f53961f6c438d2935357.zip")
set(pybind11_HASH "SHA256=ff65a1a8c9e6ceec11e7ed9d296f2e22a63e9ff0c4264b3af29c72b4f18f25a0")
set(pybind11_URL "https://github.com/pybind/pybind11/archive/refs/tags/v2.12.0.tar.gz")
set(pybind11_URL2 "https://hf-mirror.com/csukuangfj/sherpa-onnx-cmake-deps/resolve/main/pybind11-2.12.0.tar.gz")
set(pybind11_HASH "SHA256=bf8f242abd1abcd375d516a7067490fb71abd79519a282d22b6e4d19282185a7")
# If you don't have access to the Internet,
# please pre-download pybind11
set(possible_file_locations
$ENV{HOME}/Downloads/pybind11-5bc0943ed96836f46489f53961f6c438d2935357.zip
${PROJECT_SOURCE_DIR}/pybind11-5bc0943ed96836f46489f53961f6c438d2935357.zip
${PROJECT_BINARY_DIR}/pybind11-5bc0943ed96836f46489f53961f6c438d2935357.zip
/tmp/pybind11-5bc0943ed96836f46489f53961f6c438d2935357.zip
/star-fj/fangjun/download/github/pybind11-5bc0943ed96836f46489f53961f6c438d2935357.zip
$ENV{HOME}/Downloads/pybind11-2.12.0.tar.gz
${CMAKE_SOURCE_DIR}/pybind11-2.12.0.tar.gz
${CMAKE_BINARY_DIR}/pybind11-2.12.0.tar.gz
/tmp/pybind11-2.12.0.tar.gz
/star-fj/fangjun/download/github/pybind11-2.12.0.tar.gz
)
foreach(f IN LISTS possible_file_locations)

View File

@ -9,6 +9,11 @@ You can find pre-compiled wheels at
We give a few examples below to show you how to install `kaldifeat`_ from
pre-compiled wheels.
.. hint::
The following lists only some examples. We suggest that you always select the
latest version of ``kaldifeat``.
Linux (CPU)
-----------
@ -16,21 +21,29 @@ Suppose you want to install the following wheel:
.. code-block:: bash
https://huggingface.co/csukuangfj/kaldifeat/resolve/main/ubuntu-cpu/kaldifeat-1.25.0.dev20230726+cpu.torch2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
https://huggingface.co/csukuangfj/kaldifeat/resolve/main/ubuntu-cpu/kaldifeat-1.25.4.dev20240725+cpu.torch2.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
you can use one of the following methods:
.. code-block:: bash
# method 1
pip install torch==2.0.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
pip install kaldifeat==1.25.0.dev20230726+cpu.torch2.0.1 -f https://csukuangfj.github.io/kaldifeat/cpu.html
pip install torch==2.4.0+cpu -f https://download.pytorch.org/whl/torch/
pip install kaldifeat==1.25.4.dev20240725+cpu.torch2.4.0 -f https://csukuangfj.github.io/kaldifeat/cpu.html
# For users from China
# 中国国内用户,如果访问不了 huggingface, 请使用
# pip install kaldifeat==1.25.4.dev20240725+cpu.torch2.4.0 -f https://csukuangfj.github.io/kaldifeat/cpu-cn.html
# method 2
pip install torch==2.0.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
wget https://huggingface.co/csukuangfj/kaldifeat/resolve/main/ubuntu-cpu/kaldifeat-1.25.0.dev20230726+cpu.torch2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
pip install ./kaldifeat-1.25.0.dev20230726+cpu.torch2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
pip install torch==2.4.0+cpu -f https://download.pytorch.org/whl/torch/
wget https://huggingface.co/csukuangfj/kaldifeat/resolve/main/ubuntu-cpu/kaldifeat-1.25.4.dev20240725+cpu.torch2.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
# For users from China
# 中国国内用户,如果访问不了 huggingface, 请使用
# wget https://hf-mirror.com/csukuangfj/kaldifeat/resolve/main/ubuntu-cpu/kaldifeat-1.25.4.dev20240725+cpu.torch2.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
pip install ./kaldifeat-1.25.4.dev20240725+cpu.torch2.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
Windows (CPU)
--------------
@ -39,20 +52,29 @@ Suppose you want to install the following wheel:
.. code-block:: bash
https://huggingface.co/csukuangfj/kaldifeat/resolve/main/windows-cpu/kaldifeat-1.25.0.dev20230726+cpu.torch2.0.1-cp311-cp311-win_amd64.whl
https://huggingface.co/csukuangfj/kaldifeat/resolve/main/windows-cpu/kaldifeat-1.25.4.dev20240725+cpu.torch2.4.0-cp312-cp312-win_amd64.whl
you can use one of the following methods:
.. code-block:: bash
# method 1
pip install torch==2.0.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
pip install kaldifeat==1.25.0.dev20230726+cpu.torch2.0.1 -f https://csukuangfj.github.io/kaldifeat/cpu.html
pip install torch==2.4.0+cpu -f https://download.pytorch.org/whl/torch/
pip install kaldifeat==1.25.4.dev20240725+cpu.torch2.4.0 -f https://csukuangfj.github.io/kaldifeat/cpu.html
# For users from China
# 中国国内用户,如果访问不了 huggingface, 请使用
# pip install kaldifeat==1.25.4.dev20240725+cpu.torch2.4.0 -f https://csukuangfj.github.io/kaldifeat/cpu-cn.html
# method 2
pip install torch==2.0.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
wget https://huggingface.co/csukuangfj/kaldifeat/resolve/main/windows-cpu/kaldifeat-1.25.0.dev20230726+cpu.torch2.0.1-cp311-cp311-win_amd64.whl
pip install ./kaldifeat-1.25.0.dev20230726+cpu.torch2.0.1-cp311-cp311-win_amd64.whl
pip install torch==2.4.0+cpu -f https://download.pytorch.org/whl/torch/
wget https://huggingface.co/csukuangfj/kaldifeat/resolve/main/windows-cpu/kaldifeat-1.25.4.dev20240725+cpu.torch2.4.0-cp312-cp312-win_amd64.whl
# For users from China
# 中国国内用户,如果访问不了 huggingface, 请使用
# wget https://hf-mirror.com/csukuangfj/kaldifeat/resolve/main/windows-cpu/kaldifeat-1.25.4.dev20240725+cpu.torch2.4.0-cp312-cp312-win_amd64.whl
pip install ./kaldifeat-1.25.4.dev20240725+cpu.torch2.4.0-cp312-cp312-win_amd64.whl
macOS (CPU)
-----------
@ -61,20 +83,29 @@ Suppose you want to install the following wheel:
.. code-block:: bash
https://huggingface.co/csukuangfj/kaldifeat/resolve/main/macos/kaldifeat-1.25.0.dev20230726+cpu.torch2.0.1-cp311-cp311-macosx_10_9_x86_64.whl
https://huggingface.co/csukuangfj/kaldifeat/resolve/main/macos/kaldifeat-1.25.4.dev20240725+cpu.torch2.4.0-cp311-cp311-macosx_11_0_arm64.whl
you can use one of the following methods:
.. code-block:: bash
# method 1
pip install torch==2.0.1
pip install kaldifeat==1.25.0.dev20230726+cpu.torch2.0.1 -f https://csukuangfj.github.io/kaldifeat/cpu.html
pip install torch==2.4.0
pip install kaldifeat==1.25.4.dev20240725+cpu.torch2.4.0 -f https://csukuangfj.github.io/kaldifeat/cpu.html
# For users from China
# 中国国内用户,如果访问不了 huggingface, 请使用
# pip install kaldifeat==1.25.4.dev20240725+cpu.torch2.4.0 -f https://csukuangfj.github.io/kaldifeat/cpu-cn.html
# method 2
pip install torch==2.0.1+cpu -f https://download.pytorch.org/whl/torch_stable.html
wget https://huggingface.co/csukuangfj/kaldifeat/resolve/main/macos/kaldifeat-1.25.0.dev20230726+cpu.torch2.0.1-cp311-cp311-macosx_10_9_x86_64.whl
pip install ./kaldifeat-1.25.0.dev20230726+cpu.torch2.0.1-cp311-cp311-macosx_10_9_x86_64.whl
pip install torch==2.4.0 -f https://download.pytorch.org/whl/torch/
wget https://huggingface.co/csukuangfj/kaldifeat/resolve/main/macos/kaldifeat-1.25.4.dev20240725+cpu.torch2.4.0-cp311-cp311-macosx_11_0_arm64.whl
# For users from China
# 中国国内用户,如果访问不了 huggingface, 请使用
# wget https://hf-mirror.com/csukuangfj/kaldifeat/resolve/main/macos/kaldifeat-1.25.4.dev20240725+cpu.torch2.4.0-cp311-cp311-macosx_11_0_arm64.whl
pip install ./kaldifeat-1.25.4.dev20240725+cpu.torch2.4.0-cp311-cp311-macosx_11_0_arm64.whl
Linux (CUDA)
------------
@ -83,17 +114,26 @@ Suppose you want to install the following wheel:
.. code-block:: bash
https://huggingface.co/csukuangfj/kaldifeat/resolve/main/ubuntu-cuda/kaldifeat-1.25.0.dev20230726+cuda11.8.torch2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
https://huggingface.co/csukuangfj/kaldifeat/resolve/main/ubuntu-cuda/kaldifeat-1.25.4.dev20240725+cuda12.4.torch2.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
you can use one of the following methods:
.. code-block:: bash
# method 1
pip install torch==2.0.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html
pip install kaldifeat==1.25.0.dev20230726+cuda11.8.torch2.0.1 -f https://csukuangfj.github.io/kaldifeat/cuda.html
pip install torch==2.4.0+cu124 -f https://download.pytorch.org/whl/torch/
pip install kaldifeat==1.25.4.dev20240725+cuda12.4.torch2.4.0 -f https://csukuangfj.github.io/kaldifeat/cuda.html
# For users from China
# 中国国内用户,如果访问不了 huggingface, 请使用
# pip install kaldifeat==1.25.4.dev20240725+cuda12.4.torch2.4.0 -f https://csukuangfj.github.io/kaldifeat/cuda-cn.html
# method 2
pip install torch==2.0.1+cu118 -f https://download.pytorch.org/whl/torch_stable.html
wget https://huggingface.co/csukuangfj/kaldifeat/resolve/main/ubuntu-cuda/kaldifeat-1.25.0.dev20230724+cuda11.8.torch2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
pip install ./kaldifeat-1.25.0.dev20230726+cuda11.8.torch2.0.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
pip install torch==2.4.0+cu124 -f https://download.pytorch.org/whl/torch/
wget https://huggingface.co/csukuangfj/kaldifeat/resolve/main/ubuntu-cuda/kaldifeat-1.25.4.dev20240725+cuda12.4.torch2.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
# For users from China
# 中国国内用户,如果访问不了 huggingface, 请使用
# wget https://hf-mirror.com/csukuangfj/kaldifeat/resolve/main/ubuntu-cuda/kaldifeat-1.25.4.dev20240725+cuda12.4.torch2.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl
pip install ./kaldifeat-1.25.4.dev20240725+cuda12.4.torch2.4.0-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl

View File

@ -10,6 +10,7 @@ set(kaldifeat_srcs
matrix-functions.cc
mel-computations.cc
online-feature.cc
whisper-fbank.cc
)
add_library(kaldifeat_core ${kaldifeat_srcs})

View File

@ -0,0 +1 @@
exclude_files=whisper-mel-bank.h,whisper-v3-mel-bank.h

View File

@ -65,7 +65,7 @@ torch::Tensor FbankComputer::Compute(torch::Tensor signal_raw_log_energy,
// note spectrum is in magnitude, not power, because of `abs()`
#if defined(KALDIFEAT_HAS_FFT_NAMESPACE)
// signal_frame shape: [x, 512]
// spectrum shape [x, 257
// spectrum shape [x, 257]
torch::Tensor spectrum = torch::fft::rfft(signal_frame).abs();
#else
// signal_frame shape [x, 512]

View File

@ -29,6 +29,13 @@ FeatureWindowFunction::FeatureWindowFunction(const FrameExtractionOptions &opts,
float *window_data = window.data_ptr<float>();
double a = M_2PI / (frame_length - 1);
if (opts.window_type == "hann") {
// see https://pytorch.org/docs/stable/generated/torch.hann_window.html
// We assume periodic is true
a = M_2PI / frame_length;
}
for (int32_t i = 0; i < frame_length; i++) {
double i_fl = static_cast<double>(i);
if (opts.window_type == "hanning") {
@ -39,6 +46,8 @@ FeatureWindowFunction::FeatureWindowFunction(const FrameExtractionOptions &opts,
window_data[i] = sin(0.5 * a * i_fl);
} else if (opts.window_type == "hamming") {
window_data[i] = 0.54 - 0.46 * cos(a * i_fl);
} else if (opts.window_type == "hann") {
window_data[i] = 0.50 - 0.50 * cos(a * i_fl);
} else if (opts.window_type ==
"povey") { // like hamming but goes to zero at edges.
window_data[i] = pow(0.5 - 0.5 * cos(a * i_fl), 0.85);

View File

@ -0,0 +1,39 @@
#!/usr/bin/env python3
# Copyright (c) 2023 Xiaomi Corporation (authors: Fangjun Kuang)
import librosa
import numpy as np
def main():
m = librosa.filters.mel(sr=16000, n_fft=400, n_mels=128)
assert m.shape == (128, 201)
s = "// Auto-generated. Do NOT edit!\n\n"
s += "// Copyright (c) 2023 Xiaomi Corporation (authors: Fangjun Kuang)\n\n"
s += "\n"
s += "#ifndef KALDIFEAT_CSRC_WHISPER_V3_MEL_BANK_H_\n"
s += "#define KALDIFEAT_CSRC_WHISPER_V3_MEL_BANK_H_\n"
s += "namespace kaldifeat {\n\n"
s += f"constexpr int32_t kWhisperV3MelRows = {m.shape[0]};\n"
s += f"constexpr int32_t kWhisperV3MelCols = {m.shape[1]};\n"
s += "\n"
s += "constexpr float kWhisperV3MelArray[] = {\n"
sep = ""
for i, f in enumerate(m.reshape(-1).tolist()):
s += f"{sep}{f:.8f}"
sep = ", "
if i and i % 7 == 0:
s += ",\n"
sep = ""
s += "};\n\n"
s += "} // namespace kaldifeat\n\n"
s += "#endif // KALDIFEAT_CSRC_WHISPER_V3_MEL_BANK_H_\n"
with open("whisper-v3-mel-bank.h", "w") as f:
f.write(s)
if __name__ == "__main__":
main()

View File

@ -0,0 +1,39 @@
#!/usr/bin/env python3
# Copyright (c) 2023 Xiaomi Corporation (authors: Fangjun Kuang)
import librosa
import numpy as np
def main():
m = librosa.filters.mel(sr=16000, n_fft=400, n_mels=80)
assert m.shape == (80, 201)
s = "// Auto-generated. Do NOT edit!\n\n"
s += "// Copyright (c) 2023 Xiaomi Corporation (authors: Fangjun Kuang)\n\n"
s += "\n"
s += "#ifndef KALDIFEAT_CSRC_WHISPER_MEL_BANK_H_\n"
s += "#define KALDIFEAT_CSRC_WHISPER_MEL_BANK_H_\n"
s += "namespace kaldifeat {\n\n"
s += f"constexpr int32_t kWhisperMelRows = {m.shape[0]};\n"
s += f"constexpr int32_t kWhisperMelCols = {m.shape[1]};\n"
s += "\n"
s += "constexpr float kWhisperMelArray[] = {\n"
sep = ""
for i, f in enumerate(m.reshape(-1).tolist()):
s += f"{sep}{f:.8f}"
sep = ", "
if i and i % 7 == 0:
s += ",\n"
sep = ""
s += "};\n\n"
s += "} // namespace kaldifeat\n\n"
s += "#endif // KALDIFEAT_CSRC_WHISPER_MEL_BANK_H_\n"
with open("whisper-mel-bank.h", "w") as f:
f.write(s)
if __name__ == "__main__":
main()

View File

@ -138,7 +138,7 @@ MelBanks::MelBanks(const MelBanksOptions &opts,
<< " and vtln-high " << vtln_high << ", versus "
<< "low-freq " << low_freq << " and high-freq " << high_freq;
// we will transpose bins_mat_ at the end of this funciton
// we will transpose bins_mat_ at the end of this function
bins_mat_ = torch::zeros({num_bins, num_fft_bins}, torch::kFloat);
int32_t stride = bins_mat_.strides()[0];
@ -179,12 +179,14 @@ MelBanks::MelBanks(const MelBanksOptions &opts,
last_index = i;
}
}
KALDIFEAT_ASSERT(first_index != -1 && last_index >= first_index &&
"You may have set num_mel_bins too large.");
// Note: It is possible that first_index == last_index == -1 at this line.
// Replicate a bug in HTK, for testing purposes.
if (opts.htk_mode && bin == 0 && mel_low_freq != 0.0f)
if (opts.htk_mode && bin == 0 && mel_low_freq != 0.0f &&
first_index != -1) {
this_bin[first_index] = 0.0f;
}
}
if (debug_) KALDIFEAT_LOG << bins_mat_;
@ -196,6 +198,15 @@ MelBanks::MelBanks(const MelBanksOptions &opts,
}
}
MelBanks::MelBanks(const float *weights, int32_t num_rows, int32_t num_cols,
torch::Device device)
: debug_(false), htk_mode_(false) {
bins_mat_ = torch::from_blob(const_cast<float *>(weights),
{num_rows, num_cols}, torch::kFloat)
.t()
.to(device);
}
torch::Tensor MelBanks::Compute(const torch::Tensor &spectrum) const {
return torch::mm(spectrum, bins_mat_);
}

View File

@ -76,6 +76,17 @@ class MelBanks {
const FrameExtractionOptions &frame_opts, float vtln_warp_factor,
torch::Device device);
// Initialize with a 2-d weights matrix
//
// Note: This constructor is for Whisper. It does not initialize
// center_freqs_.
//
// @param weights Pointer to the start address of the matrix
// @param num_rows It equals to number of mel bins
// @param num_cols It equals to (number of fft bins)/2+1
MelBanks(const float *weights, int32_t num_rows, int32_t num_cols,
torch::Device device);
// CAUTION: we save a transposed version of bins_mat_, so return size(1) here
int32_t NumBins() const { return static_cast<int32_t>(bins_mat_.size(1)); }
@ -89,7 +100,8 @@ class MelBanks {
private:
// A 2-D matrix. Its shape is NOT [num_bins, num_fft_bins]
// Its shape is [num_fft_bins, num_bins].
// Its shape is [num_fft_bins, num_bins] for non-whisper.
// For whisper, its shape is [num_fft_bins/2+1, num_bins]
torch::Tensor bins_mat_;
// center frequencies of bins, numbered from 0 ... num_bins-1.

View File

@ -0,0 +1,88 @@
/**
* Copyright (c) 2023 Xiaomi Corporation (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "kaldifeat/csrc/whisper-fbank.h"
#include <cmath>
#include <vector>
#include "kaldifeat/csrc/mel-computations.h"
#include "kaldifeat/csrc/whisper-mel-bank.h"
#include "kaldifeat/csrc/whisper-v3-mel-bank.h"
#ifndef M_2PI
#define M_2PI 6.283185307179586476925286766559005
#endif
namespace kaldifeat {
WhisperFbankComputer::WhisperFbankComputer(const WhisperFbankOptions &opts)
: opts_(opts) {
if (opts.num_mels == 80) {
mel_banks_ = std::make_unique<MelBanks>(kWhisperMelArray, kWhisperMelRows,
kWhisperMelCols, opts.device);
} else if (opts.num_mels == 128) {
mel_banks_ = std::make_unique<MelBanks>(
kWhisperV3MelArray, kWhisperV3MelRows, kWhisperV3MelCols, opts.device);
} else {
KALDIFEAT_ERR << "Unsupported num_mels: " << opts.num_mels
<< ". Support only 80 and 128";
}
opts_.frame_opts.samp_freq = 16000;
opts_.frame_opts.frame_shift_ms = 10;
opts_.frame_opts.frame_length_ms = 25;
opts_.frame_opts.dither = 0;
opts_.frame_opts.preemph_coeff = 0;
opts_.frame_opts.remove_dc_offset = false;
opts_.frame_opts.window_type = "hann";
opts_.frame_opts.round_to_power_of_two = false;
opts_.frame_opts.snip_edges = false;
}
torch::Tensor WhisperFbankComputer::Compute(
torch::Tensor /*signal_raw_log_energy*/, float /*vtln_warp*/,
const torch::Tensor &signal_frame) {
KALDIFEAT_ASSERT(signal_frame.dim() == 2);
KALDIFEAT_ASSERT(signal_frame.size(1) == opts_.frame_opts.PaddedWindowSize());
// note spectrum is in magnitude, not power, because of `abs()`
#if defined(KALDIFEAT_HAS_FFT_NAMESPACE)
// signal_frame shape: [x, 512]
// power shape [x, 257]
torch::Tensor power = torch::fft::rfft(signal_frame).abs().pow(2);
#else
// signal_frame shape [x, 512]
// real_imag shape [x, 257, 2],
// where [..., 0] is the real part
// [..., 1] is the imaginary part
torch::Tensor real_imag = torch::rfft(signal_frame, 1);
torch::Tensor real = real_imag.index({"...", 0});
torch::Tensor imag = real_imag.index({"...", 1});
torch::Tensor power = (real.square() + imag.square());
#endif
torch::Tensor mel_energies = mel_banks_->Compute(power);
torch::Tensor log_spec = torch::clamp_min(mel_energies, 1e-10).log10();
log_spec = torch::maximum(log_spec, log_spec.max() - 8.0);
torch::Tensor mel = (log_spec + 4.0) / 4.0;
return mel;
}
} // namespace kaldifeat

View File

@ -0,0 +1,78 @@
/**
* Copyright (c) 2023 Xiaomi Corporation (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef KALDIFEAT_CSRC_WHISPER_FBANK_H_
#define KALDIFEAT_CSRC_WHISPER_FBANK_H_
#include <memory>
#include <string>
#include <vector>
#include "kaldifeat/csrc/feature-common.h"
#include "kaldifeat/csrc/feature-window.h"
#include "kaldifeat/csrc/mel-computations.h"
namespace kaldifeat {
struct WhisperFbankOptions {
FrameExtractionOptions frame_opts;
// for large v3, please use 128
int32_t num_mels = 80;
torch::Device device{"cpu"};
std::string ToString() const {
std::ostringstream os;
os << "WhisperFbankOptions(";
os << "frame_opts=" << frame_opts.ToString() << ", ";
os << "num_mels=" << num_mels << ", ";
os << "device=\"" << device << "\")";
return os.str();
}
};
class WhisperFbankComputer {
public:
// note: Only frame_opts.device is used. All other fields from frame_opts
// are ignored
explicit WhisperFbankComputer(const WhisperFbankOptions &opts = {});
int32_t Dim() const { return opts_.num_mels; }
const FrameExtractionOptions &GetFrameOptions() const {
return opts_.frame_opts;
}
const WhisperFbankOptions &GetOptions() const { return opts_; }
torch::Tensor Compute(torch::Tensor /*signal_raw_log_energy*/,
float /*vtln_warp*/, const torch::Tensor &signal_frame);
// if true, compute log_energy_pre_window but after dithering and dc removal
bool NeedRawLogEnergy() const { return false; }
using Options = WhisperFbankOptions;
private:
WhisperFbankOptions opts_;
std::unique_ptr<MelBanks> mel_banks_;
};
using WhisperFbank = OfflineFeatureTpl<WhisperFbankComputer>;
} // namespace kaldifeat
#endif // KALDIFEAT_CSRC_WHISPER_FBANK_H_

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -9,6 +9,7 @@ pybind11_add_module(_kaldifeat
mel-computations.cc
online-feature.cc
utils.cc
whisper-fbank.cc
)
if(APPLE)
@ -28,10 +29,10 @@ endif()
target_link_libraries(_kaldifeat PRIVATE kaldifeat_core)
if(UNIX AND NOT APPLE)
target_link_libraries(_kaldifeat PUBLIC ${TORCH_DIR}/lib/libtorch_python.so)
target_link_libraries(_kaldifeat PUBLIC ${PYTHON_LIBRARY})
# target_link_libraries(_kaldifeat PUBLIC ${PYTHON_LIBRARY})
elseif(WIN32)
target_link_libraries(_kaldifeat PUBLIC ${TORCH_DIR}/lib/torch_python.lib)
target_link_libraries(_kaldifeat PUBLIC ${PYTHON_LIBRARIES})
# target_link_libraries(_kaldifeat PUBLIC ${PYTHON_LIBRARIES})
endif()
install(TARGETS _kaldifeat

View File

@ -12,6 +12,7 @@
#include "kaldifeat/python/csrc/feature-window.h"
#include "kaldifeat/python/csrc/mel-computations.h"
#include "kaldifeat/python/csrc/online-feature.h"
#include "kaldifeat/python/csrc/whisper-fbank.h"
#include "torch/torch.h"
namespace kaldifeat {
@ -22,6 +23,7 @@ PYBIND11_MODULE(_kaldifeat, m) {
PybindFeatureWindow(m);
PybindMelComputations(m);
PybindFeatureFbank(m);
PybindWhisperFbank(&m);
PybindFeatureMfcc(m);
PybindFeaturePlp(m);
PybindFeatureSpectrogram(m);

View File

@ -123,6 +123,35 @@ py::dict AsDict(const FbankOptions &opts) {
return dict;
}
WhisperFbankOptions WhisperFbankOptionsFromDict(py::dict dict) {
WhisperFbankOptions opts;
if (dict.contains("frame_opts")) {
opts.frame_opts = FrameExtractionOptionsFromDict(dict["frame_opts"]);
}
FROM_DICT(int_, num_mels);
if (dict.contains("device")) {
opts.device = torch::Device(std::string(py::str(dict["device"])));
}
return opts;
}
py::dict AsDict(const WhisperFbankOptions &opts) {
py::dict dict;
dict["frame_opts"] = AsDict(opts.frame_opts);
AS_DICT(num_mels);
auto torch_device = py::module_::import("torch").attr("device");
dict["device"] = torch_device(opts.device.str());
return dict;
}
MfccOptions MfccOptionsFromDict(py::dict dict) {
MfccOptions opts;

View File

@ -11,6 +11,7 @@
#include "kaldifeat/csrc/feature-spectrogram.h"
#include "kaldifeat/csrc/feature-window.h"
#include "kaldifeat/csrc/mel-computations.h"
#include "kaldifeat/csrc/whisper-fbank.h"
#include "kaldifeat/python/csrc/kaldifeat.h"
/*
@ -36,6 +37,9 @@ py::dict AsDict(const MelBanksOptions &opts);
FbankOptions FbankOptionsFromDict(py::dict dict);
py::dict AsDict(const FbankOptions &opts);
WhisperFbankOptions WhisperFbankOptionsFromDict(py::dict dict);
py::dict AsDict(const WhisperFbankOptions &opts);
MfccOptions MfccOptionsFromDict(py::dict dict);
py::dict AsDict(const MfccOptions &opts);

View File

@ -0,0 +1,84 @@
// kaldifeat/python/csrc/whisper-fbank.cc
//
// Copyright (c) 2023 Xiaomi Corporation (authors: Fangjun Kuang)
#include "kaldifeat/python/csrc/whisper-fbank.h"
#include <memory>
#include <string>
#include "kaldifeat/csrc/whisper-fbank.h"
#include "kaldifeat/python/csrc/utils.h"
namespace kaldifeat {
static void PybindWhisperFbankOptions(py::module *m) {
using PyClass = WhisperFbankOptions;
py::class_<PyClass>(*m, "WhisperFbankOptions")
.def(py::init<>())
.def(py::init([](const FrameExtractionOptions &frame_opts =
FrameExtractionOptions(),
int32_t num_mels = 80,
py::object device = py::str(
"cpu")) -> std::unique_ptr<WhisperFbankOptions> {
auto opts = std::make_unique<WhisperFbankOptions>();
opts->frame_opts = frame_opts;
opts->num_mels = num_mels;
std::string s = static_cast<py::str>(device);
opts->device = torch::Device(s);
return opts;
}),
py::arg("frame_opts") = FrameExtractionOptions(),
py::arg("num_mels") = 80, py::arg("device") = py::str("cpu"))
.def_readwrite("frame_opts", &PyClass::frame_opts)
.def_readwrite("num_mels", &PyClass::num_mels)
.def_property(
"device",
[](const PyClass &self) -> py::object {
py::object ans = py::module_::import("torch").attr("device");
return ans(self.device.str());
},
[](PyClass &self, py::object obj) -> void {
std::string s = static_cast<py::str>(obj);
self.device = torch::Device(s);
})
.def("__str__",
[](const PyClass &self) -> std::string { return self.ToString(); })
.def("as_dict",
[](const PyClass &self) -> py::dict { return AsDict(self); })
.def_static("from_dict",
[](py::dict dict) -> PyClass {
return WhisperFbankOptionsFromDict(dict);
})
.def(py::pickle(
[](const PyClass &self) -> py::dict { return AsDict(self); },
[](py::dict dict) -> PyClass {
return WhisperFbankOptionsFromDict(dict);
}));
}
static void PybindWhisperFbankImpl(py::module *m) {
using PyClass = WhisperFbank;
py::class_<PyClass>(*m, "WhisperFbank")
.def(py::init<const WhisperFbankOptions &>(), py::arg("opts"))
.def("dim", &PyClass::Dim)
.def_property_readonly("options", &PyClass::GetOptions)
.def("compute_features", &PyClass::ComputeFeatures, py::arg("wave"),
py::arg("vtln_warp"), py::call_guard<py::gil_scoped_release>())
.def(py::pickle(
[](const PyClass &self) -> py::dict {
return AsDict(self.GetOptions());
},
[](py::dict dict) -> std::unique_ptr<PyClass> {
return std::make_unique<PyClass>(WhisperFbankOptionsFromDict(dict));
}));
}
void PybindWhisperFbank(py::module *m) {
PybindWhisperFbankOptions(m);
PybindWhisperFbankImpl(m);
}
} // namespace kaldifeat

View File

@ -0,0 +1,16 @@
// kaldifeat/python/csrc/whisper-fbank.h
//
// Copyright (c) 2023 Xiaomi Corporation (authors: Fangjun Kuang)
#ifndef KALDIFEAT_PYTHON_CSRC_WHISPER_FBANK_H_
#define KALDIFEAT_PYTHON_CSRC_WHISPER_FBANK_H_
#include "kaldifeat/python/csrc/kaldifeat.h"
namespace kaldifeat {
void PybindWhisperFbank(py::module *m);
} // namespace kaldifeat
#endif // KALDIFEAT_PYTHON_CSRC_WHISPER_FBANK_H_

View File

@ -17,6 +17,7 @@ from _kaldifeat import (
MfccOptions,
PlpOptions,
SpectrogramOptions,
WhisperFbankOptions,
num_frames,
)
@ -26,6 +27,7 @@ from .offline_feature import OfflineFeature
from .online_feature import OnlineFeature
from .plp import OnlinePlp, Plp
from .spectrogram import Spectrogram
from .whisper_fbank import WhisperFbank
cmake_prefix_path = _Path(__file__).parent / "share" / "cmake"
del _Path

View File

@ -0,0 +1,12 @@
# Copyright (c) 2021 Xiaomi Corporation (authors: Fangjun Kuang)
import _kaldifeat
from .offline_feature import OfflineFeature
class WhisperFbank(OfflineFeature):
def __init__(self, opts: _kaldifeat.WhisperFbankOptions):
super().__init__(opts)
self.computer = _kaldifeat.WhisperFbank(opts)

View File

@ -0,0 +1,48 @@
#!/usr/bin/env python3
# Copyright 2023 Xiaomi Corporation (authors: Fangjun Kuang)
import librosa
import torch
import kaldifeat
def get_ground_truth(x):
N_FFT = 400
HOP_LENGTH = 160
m = librosa.filters.mel(sr=16000, n_fft=400, n_mels=80)
m = torch.from_numpy(m)
# print(m.shape) # [80, 201]
window = torch.hann_window(N_FFT)
stft = torch.stft(x, N_FFT, HOP_LENGTH, window=window, return_complex=True)
# print(stft.shape) # [201, 301]
magnitudes = stft[..., :-1].abs() ** 2
# print(magnitudes.shape) # [201, 300]
mel_spec = m @ magnitudes
# print(mel_spec.shape) # [80, 300]
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec.t()
def test_whisper_fbank():
x = torch.rand(16000 * 3)
gt = get_ground_truth(x)
print(gt.shape) # [300, 80]
opts = kaldifeat.WhisperFbankOptions(device="cpu")
whisper_fbank = kaldifeat.WhisperFbank(opts)
y = whisper_fbank(x) # [298, 80]
print(y.shape) # [298, 80]
# print(gt[:5, :5])
# print(y[:5, :5])
if __name__ == "__main__":
torch.manual_seed(20231108)
test_whisper_fbank()

View File

@ -0,0 +1,49 @@
#!/usr/bin/env python3
# Copyright 2023 Xiaomi Corporation (authors: Fangjun Kuang)
import librosa
import torch
import kaldifeat
def get_ground_truth(x):
N_FFT = 400
HOP_LENGTH = 160
m = librosa.filters.mel(sr=16000, n_fft=400, n_mels=128)
m = torch.from_numpy(m)
# print(m.shape) # [128, 201]
window = torch.hann_window(N_FFT)
stft = torch.stft(x, N_FFT, HOP_LENGTH, window=window, return_complex=True)
# print(stft.shape) # [201, 301]
magnitudes = stft[..., :-1].abs() ** 2
# print(magnitudes.shape) # [201, 300]
mel_spec = m @ magnitudes
# print(mel_spec.shape) # [128, 300]
log_spec = torch.clamp(mel_spec, min=1e-10).log10()
log_spec = torch.maximum(log_spec, log_spec.max() - 8.0)
log_spec = (log_spec + 4.0) / 4.0
return log_spec.t()
def test_whisper_v3_fbank():
x = torch.rand(16000 * 3)
gt = get_ground_truth(x)
print(gt.shape) # [300, 128]
opts = kaldifeat.WhisperFbankOptions(num_mels=128, device="cpu")
print(opts)
whisper_fbank = kaldifeat.WhisperFbank(opts)
y = whisper_fbank(x) # [298, 128]
print(y.shape) # [298, 128]
print(gt[:5, :5])
print(y[:5, :5])
if __name__ == "__main__":
torch.manual_seed(20231109)
test_whisper_v3_fbank()

View File

@ -1,6 +1,6 @@
package:
name: kaldifeat
version: "1.25.1"
version: "1.25.5"
source:
path: "{{ environ.get('KALDIFEAT_ROOT_DIR') }}"

View File

@ -1,6 +1,6 @@
package:
name: kaldifeat
version: "1.25.1"
version: "1.25.5"
source:
path: "{{ environ.get('KALDIFEAT_ROOT_DIR') }}"

View File

@ -0,0 +1,78 @@
#!/usr/bin/env bash
#
set -ex
if [ -z $PYTHON_VERSION ]; then
echo "Please set the environment variable PYTHON_VERSION"
echo "Example: export PYTHON_VERSION=3.8"
# Valid values: 3.8, 3.9, 3.10, 3.11
exit 1
fi
if [ -z $TORCH_VERSION ]; then
echo "Please set the environment variable TORCH_VERSION"
echo "Example: export TORCH_VERSION=1.10.0"
exit 1
fi
export PATH=$PYTHON_INSTALL_DIR/bin:$PATH
export LD_LIBRARY_PATH=$PYTHON_INSTALL_DIR/lib:$LD_LIBRARY_PATH
ls -lh $PYTHON_INSTALL_DIR/lib/
python3 --version
which python3
python3 -m pip install scikit-build
python3 -m pip install -U pip cmake
python3 -m pip install wheel twine typing_extensions
python3 -m pip install bs4 requests tqdm auditwheel
echo "Installing torch"
python3 -m pip install -qq torch==$TORCH_VERSION || \
python3 -m pip install -qq torch==$TORCH_VERSION -f https://download.pytorch.org/whl/torch_stable.html || \
python3 -m pip install -qq torch==$TORCH_VERSION -f https://download.pytorch.org/whl/torch/
rm -rf ~/.cache/pip
yum clean all
cd /var/www
export CMAKE_CUDA_COMPILER_LAUNCHER=
export KALDIFEAT_CMAKE_ARGS=" -DPYTHON_EXECUTABLE=$PYTHON_INSTALL_DIR/bin/python3 "
export KALDIFEAT_MAKE_ARGS=" -j "
nvcc --version || true
rm -rf /usr/local/cuda*
nvcc --version || true
if [[ x"$IS_2_28" == x"1" ]]; then
plat=manylinux_2_28_aarch64
else
plat=manylinux_2_17_aarch64
fi
python3 setup.py bdist_wheel
auditwheel --verbose repair \
--exclude libc10.so \
--exclude libc10_cuda.so \
--exclude libcuda.so.1 \
--exclude libcudart.so.${CUDA_VERSION} \
--exclude libnvToolsExt.so.1 \
--exclude libnvrtc.so.${CUDA_VERSION} \
--exclude libtorch.so \
--exclude libtorch_cpu.so \
--exclude libtorch_cuda.so \
--exclude libtorch_python.so \
\
--exclude libcudnn.so.8 \
--exclude libcublas.so.11 \
--exclude libcublasLt.so.11 \
--exclude libcudart.so.11.0 \
--exclude libnvrtc.so.11.2 \
--exclude libtorch_cuda_cu.so \
--exclude libtorch_cuda_cpp.so \
--plat $plat \
dist/*.whl
ls -lh /var/www

View File

@ -15,49 +15,21 @@ if [ -z $TORCH_VERSION ]; then
exit 1
fi
yum -y install openssl-devel bzip2-devel libffi-devel xz-devel wget redhat-lsb-core
echo "Installing ${PYTHON_VERSION}.3"
curl -O https://www.python.org/ftp/python/${PYTHON_VERSION}.3/Python-${PYTHON_VERSION}.3.tgz
tar xf Python-${PYTHON_VERSION}.3.tgz
pushd Python-${PYTHON_VERSION}.3
PYTHON_INSTALL_DIR=$PWD/py-${PYTHON_VERSION}
if [[ $PYTHON_VERSION =~ 3.1. ]]; then
yum install -y openssl11-devel
sed -i 's/PKG_CONFIG openssl /PKG_CONFIG openssl11 /g' configure
fi
./configure --enable-shared --prefix=$PYTHON_INSTALL_DIR >/dev/null 2>&1
make install >/dev/null 2>&1
popd
export PATH=$PYTHON_INSTALL_DIR/bin:$PATH
export LD_LIBRARY_PATH=$PYTHON_INSTALL_DIR/lib:$LD_LIBRARY_PATH
ls -lh $PYTHON_INSTALL_DIR/lib/
nvcc --version || true
rm -rf /usr/local/cuda*
nvcc --version || true
python3 --version
which python3
if [[ $PYTHON_VERSION != 3.6 ]]; then
curl -O https://bootstrap.pypa.io/get-pip.py
python3 get-pip.py
fi
python3 -m pip install scikit-build
python3 -m pip install -U pip cmake
python3 -m pip install wheel twine typing_extensions
python3 -m pip install bs4 requests tqdm auditwheel
echo "Installing torch"
python3 -m pip install -qq torch==$TORCH_VERSION+cpu -f https://download.pytorch.org/whl/torch_stable.html
python3 -m pip install -qq torch==$TORCH_VERSION+cpu -f https://download.pytorch.org/whl/torch_stable.html || \
python3 -m pip install -qq torch==$TORCH_VERSION+cpu -f https://download.pytorch.org/whl/torch/
rm -rf ~/.cache/pip
yum clean all
@ -68,8 +40,16 @@ export CMAKE_CUDA_COMPILER_LAUNCHER=
export KALDIFEAT_CMAKE_ARGS=" -DPYTHON_EXECUTABLE=$PYTHON_INSTALL_DIR/bin/python3 "
export KALDIFEAT_MAKE_ARGS=" -j "
nvcc --version || true
rm -rf /usr/local/cuda*
nvcc --version || true
python3 setup.py bdist_wheel
if [[ x"$IS_2_28" == x"1" ]]; then
plat=manylinux_2_28_x86_64
else
plat=manylinux_2_17_x86_64
fi
auditwheel --verbose repair \
--exclude libc10.so \
@ -90,8 +70,7 @@ auditwheel --verbose repair \
--exclude libnvrtc.so.11.2 \
--exclude libtorch_cuda_cu.so \
--exclude libtorch_cuda_cpp.so \
--plat manylinux_2_17_x86_64 \
-w /var/www/wheelhouse \
--plat $plat \
dist/*.whl
ls -lh /var/www

View File

@ -18,28 +18,63 @@ fi
if [ -z $CUDA_VERSION ]; then
echo "Please set the environment variable CUDA_VERSION"
echo "Example: export CUDA_VERSION=10.2"
# valid values: 10.2, 11.1, 11.3, 11.6, 11.7, 11.8
# valid values: 10.2, 11.1, 11.3, 11.6, 11.7, 11.8, 12.1
exit 1
fi
if [[ $TORCH_VERSION =~ 2.2.* && $CUDA_VERSION =~ 12.* ]]; then
# see https://github.com/pytorch/pytorch/issues/113948
export TORCH_CUDA_ARCH_LIST="8.0 8.6 8.9 9.0"
fi
yum -y install openssl-devel bzip2-devel libffi-devel xz-devel wget redhat-lsb-core
# python3 -m pip install scikit-build
python3 -m pip install -U pip cmake
python3 -m pip install wheel twine typing_extensions
python3 -m pip install bs4 requests tqdm auditwheel
echo "Installing torch"
./install_torch.sh
python3 -c "import torch; print(torch.__file__)"
echo "Installing ${PYTHON_VERSION}.3"
curl -O https://www.python.org/ftp/python/${PYTHON_VERSION}.3/Python-${PYTHON_VERSION}.3.tgz
tar xf Python-${PYTHON_VERSION}.3.tgz
pushd Python-${PYTHON_VERSION}.3
# -- Autodetected CUDA architecture(s): 5.0;8.0;8.6;8.9;9.0;9.0a
# CMake Error at /Python-3.8.2/py-3.8/lib/python3.8/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake:227 (message):
# Unknown CUDA Architecture Name 9.0a in CUDA_SELECT_NVCC_ARCH_FLAGS
# Call Stack (most recent call first):
# /Python-3.8.2/py-3.8/lib/python3.8/site-packages/torch/share/cmake/Caffe2/public/utils.cmake:401 (cuda_select_nvcc_arch_flags)
# /Python-3.8.2/py-3.8/lib/python3.8/site-packages/torch/share/cmake/Caffe2/public/cuda.cmake:342 (torch_cuda_get_nvcc_gencode_flag)
# /Python-3.8.2/py-3.8/lib/python3.8/site-packages/torch/share/cmake/Caffe2/Caffe2Config.cmake:87 (include)
# /Python-3.8.2/py-3.8/lib/python3.8/site-packages/torch/share/cmake/Torch/TorchConfig.cmake:68 (find_package)
# cmake/torch.cmake:14 (find_package)
# CMakeLists.txt:62 (include)
sed -i.bak /9.0a/d /Python-*/py-3.*/lib/python3.*/site-packages/torch/share/cmake/Caffe2/Modules_CUDA_fix/upstream/FindCUDA/select_compute_arch.cmake || true
if [[ x"$IS_2_28" != x"1" ]]; then
yum -y install openssl-devel
fi
yum -y install zlib-devel bzip2-devel libffi-devel xz-devel wget redhat-lsb-core
INSTALLED_PYTHON_VERSION=${PYTHON_VERSION}.2
if [[ $PYTHON_VERSION == "3.13" ]]; then
INSTALLED_PYTHON_VERSION=${PYTHON_VERSION}.0
fi
echo "Installing $INSTALLED_PYTHON_VERSION"
curl -O https://www.python.org/ftp/python/$INSTALLED_PYTHON_VERSION/Python-$INSTALLED_PYTHON_VERSION.tgz
tar xf Python-$INSTALLED_PYTHON_VERSION.tgz
pushd Python-$INSTALLED_PYTHON_VERSION
PYTHON_INSTALL_DIR=$PWD/py-${PYTHON_VERSION}
if [[ $PYTHON_VERSION =~ 3.1. ]]; then
if [[ $PYTHON_VERSION =~ 3.1. && x"$IS_2_28" != x"1" ]]; then
yum install -y openssl11-devel
sed -i 's/PKG_CONFIG openssl /PKG_CONFIG openssl11 /g' configure
fi
./configure --enable-shared --prefix=$PYTHON_INSTALL_DIR >/dev/null 2>&1
make install >/dev/null 2>&1
./configure --enable-shared --prefix=$PYTHON_INSTALL_DIR >/dev/null
make install >/dev/null
popd
@ -50,21 +85,8 @@ ls -lh $PYTHON_INSTALL_DIR/lib/
python3 --version
which python3
if [[ $PYTHON_VERSION != 3.6 ]]; then
curl -O https://bootstrap.pypa.io/get-pip.py
python3 get-pip.py
fi
python3 -m pip install scikit-build
python3 -m pip install -U pip cmake
python3 -m pip install wheel twine typing_extensions
python3 -m pip install bs4 requests tqdm auditwheel
echo "Installing torch"
./install_torch.sh
rm -rf ~/.cache/pip
yum clean all
rm -rf ~/.cache/pip >/dev/null 2>&1
yum clean all >/dev/null 2>&1
cd /var/www
@ -72,9 +94,20 @@ export CMAKE_CUDA_COMPILER_LAUNCHER=
export KALDIFEAT_CMAKE_ARGS=" -DPYTHON_EXECUTABLE=$PYTHON_INSTALL_DIR/bin/python3 "
export KALDIFEAT_MAKE_ARGS=" -j2 "
echo "KALDIFEAT_CMAKE_ARGS: $KALDIFEAT_CMAKE_ARGS"
python3 setup.py bdist_wheel
if [[ x"$IS_2_28" == x"1" ]]; then
plat=manylinux_2_28_x86_64
else
plat=manylinux_2_17_x86_64
fi
export PATH=$PYTHON_INSTALL_DIR/bin:$PATH
python3 --version
which python3
auditwheel --verbose repair \
--exclude libc10.so \
--exclude libc10_cuda.so \
@ -87,14 +120,34 @@ auditwheel --verbose repair \
--exclude libtorch_cuda.so \
--exclude libtorch_python.so \
\
--exclude libcudnn.so.8 \
--exclude libcublas.so \
--exclude libcublas.so.11 \
--exclude libcublas.so.12 \
--exclude libcublasLt.so \
--exclude libcublasLt.so.11 \
--exclude libcublasLt.so.12 \
--exclude libcudart.so.11.0 \
--exclude libcudart.so.12 \
--exclude libcudnn.so.8 \
--exclude libcufft.so \
--exclude libcufft.so.11 \
--exclude libcupti.so \
--exclude libcupti.so.12 \
--exclude libcurand.so \
--exclude libcurand.so.10 \
--exclude libcusparse.so \
--exclude libcusparse.so.12 \
--exclude libnccl.so \
--exclude libnccl.so.2 \
--exclude libnvJitLink.so \
--exclude libnvJitLink.so.12 \
--exclude libnvrtc.so \
--exclude libnvrtc.so.11.2 \
--exclude libtorch_cuda_cu.so \
--exclude libnvrtc.so.12 \
--exclude libshm.so \
--exclude libtorch_cuda_cpp.so \
--plat manylinux_2_17_x86_64 \
--exclude libtorch_cuda_cu.so \
--plat $plat \
-w /var/www/wheelhouse \
dist/*.whl

View File

@ -1,10 +1,31 @@
#!/usr/bin/env python3
# Copyright 2022 Xiaomi Corp. (authors: Fangjun Kuang)
"""
See also
https://github.com/pytorch/test-infra/blob/main/.github/workflows/test_build_wheels_linux_with_cuda.yml
https://github.com/pytorch/test-infra/blob/main/.github/workflows/test_build_wheels_linux_without_cuda.yml
https://github.com/pytorch/test-infra/actions/workflows/test_build_wheels_linux_with_cuda.yml
https://github.com/pytorch/test-infra/blob/main/tools/scripts/generate_binary_build_matrix.py
"""
import argparse
import json
def version_ge(a, b):
a_major, a_minor = list(map(int, a.split(".")))[:2]
b_major, b_minor = list(map(int, b.split(".")))[:2]
if a_major > b_major:
return True
if a_major == b_major and a_minor >= b_minor:
return True
return False
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument(
@ -28,6 +49,13 @@ def get_args():
help="True for macOS",
)
parser.add_argument(
"--for-arm64",
action="store_true",
default=False,
help="True for arm64",
)
parser.add_argument(
"--test-only-latest-torch",
action="store_true",
@ -38,7 +66,9 @@ def get_args():
return parser.parse_args()
def generate_build_matrix(enable_cuda, for_windows, for_macos, test_only_latest_torch):
def generate_build_matrix(
enable_cuda, for_windows, for_macos, test_only_latest_torch, for_arm64
):
matrix = {
# 1.5.x is removed because there are compilation errors.
# See
@ -53,77 +83,91 @@ def generate_build_matrix(enable_cuda, for_windows, for_macos, test_only_latest_
# "python-version": ["3.6", "3.7", "3.8"],
# "cuda": ["10.1", "10.2"],
# },
"1.6.0": {
"python-version": ["3.6", "3.7", "3.8"],
"cuda": ["10.1", "10.2"] if not for_windows else ["10.1.243", "10.2.89"],
},
"1.7.0": {
"python-version": ["3.6", "3.7", "3.8"],
"cuda": ["10.1", "10.2", "11.0"]
if not for_windows
else ["10.1.243", "10.2.89", "11.0.3"],
},
"1.7.1": {
"python-version": ["3.6", "3.7", "3.8", "3.9"],
"cuda": ["10.1", "10.2", "11.0"]
if not for_windows
else ["10.1.243", "10.2.89", "11.0.3"],
},
"1.8.0": {
"python-version": ["3.6", "3.7", "3.8", "3.9"],
"cuda": ["10.1", "10.2", "11.1"]
if not for_windows
else ["10.1.243", "10.2.89", "11.1.1"],
},
"1.8.1": {
"python-version": ["3.6", "3.7", "3.8", "3.9"],
"cuda": ["10.1", "10.2", "11.1"]
if not for_windows
else ["10.1.243", "10.2.89", "11.1.1"],
},
"1.9.0": {
"python-version": ["3.6", "3.7", "3.8", "3.9"],
"cuda": ["10.2", "11.1"] if not for_windows else ["10.2.89", "11.1.1"],
},
"1.9.1": {
"python-version": ["3.6", "3.7", "3.8", "3.9"],
"cuda": ["10.2", "11.1"] if not for_windows else ["10.2.89", "11.1.1"],
},
# "1.6.0": {
# "python-version": ["3.6", "3.7", "3.8"],
# "cuda": ["10.1", "10.2"] if not for_windows else ["10.1.243", "10.2.89"],
# },
# "1.7.0": {
# "python-version": ["3.6", "3.7", "3.8"],
# "cuda": (
# ["10.1", "10.2", "11.0"]
# if not for_windows
# else ["10.1.243", "10.2.89", "11.0.3"]
# ),
# },
# "1.7.1": {
# "python-version": ["3.6", "3.7", "3.8", "3.9"],
# "cuda": (
# ["10.1", "10.2", "11.0"]
# if not for_windows
# else ["10.1.243", "10.2.89", "11.0.3"]
# ),
# },
# "1.8.0": {
# "python-version": ["3.6", "3.7", "3.8", "3.9"],
# "cuda": (
# ["10.1", "10.2", "11.1"]
# if not for_windows
# else ["10.1.243", "10.2.89", "11.1.1"]
# ),
# },
# "1.8.1": {
# "python-version": ["3.6", "3.7", "3.8", "3.9"],
# "cuda": (
# ["10.1", "10.2", "11.1"]
# if not for_windows
# else ["10.1.243", "10.2.89", "11.1.1"]
# ),
# },
# "1.9.0": {
# "python-version": ["3.6", "3.7", "3.8", "3.9"],
# "cuda": ["10.2", "11.1"] if not for_windows else ["10.2.89", "11.1.1"],
# },
# "1.9.1": {
# "python-version": ["3.6", "3.7", "3.8", "3.9"],
# "cuda": ["10.2", "11.1"] if not for_windows else ["10.2.89", "11.1.1"],
# },
"1.10.0": {
"python-version": ["3.6", "3.7", "3.8", "3.9"],
"cuda": ["10.2", "11.1", "11.3"]
if not for_windows
else ["10.2.89", "11.1.1", "11.3.1"],
"cuda": (
["10.2", "11.1", "11.3"]
if not for_windows
else ["10.2.89", "11.1.1", "11.3.1"]
),
},
"1.10.1": {
"python-version": ["3.6", "3.7", "3.8", "3.9"],
"cuda": ["10.2", "11.1", "11.3"]
if not for_windows
else ["10.2.89", "11.1.1", "11.3.1"],
"cuda": (
["10.2", "11.1", "11.3"]
if not for_windows
else ["10.2.89", "11.1.1", "11.3.1"]
),
},
"1.10.2": {
"python-version": ["3.6", "3.7", "3.8", "3.9"],
"cuda": ["10.2", "11.1", "11.3"]
if not for_windows
else ["10.2.89", "11.1.1", "11.3.1"],
"cuda": (
["10.2", "11.1", "11.3"]
if not for_windows
else ["10.2.89", "11.1.1", "11.3.1"]
),
},
"1.11.0": {
"python-version": ["3.7", "3.8", "3.9", "3.10"],
"cuda": ["10.2", "11.3", "11.5"]
if not for_windows
else ["11.3.1", "11.5.2"],
"cuda": (
["10.2", "11.3", "11.5"] if not for_windows else ["11.3.1", "11.5.2"]
),
},
"1.12.0": {
"python-version": ["3.7", "3.8", "3.9", "3.10"],
"cuda": ["10.2", "11.3", "11.6"]
if not for_windows
else ["11.3.1", "11.6.2"],
"cuda": (
["10.2", "11.3", "11.6"] if not for_windows else ["11.3.1", "11.6.2"]
),
},
"1.12.1": {
"python-version": ["3.7", "3.8", "3.9", "3.10"],
"cuda": ["10.2", "11.3", "11.6"]
if not for_windows
else ["11.3.1", "11.6.2"],
"cuda": (
["10.2", "11.3", "11.6"] if not for_windows else ["11.3.1", "11.6.2"]
),
},
"1.13.0": {
"python-version": ["3.7", "3.8", "3.9", "3.10", "3.11"],
@ -131,28 +175,171 @@ def generate_build_matrix(enable_cuda, for_windows, for_macos, test_only_latest_
},
"1.13.1": {
"python-version": ["3.7", "3.8", "3.9", "3.10", "3.11"],
"cuda": ["11.6", "11.7"] # default 11.7
if not for_windows
else ["11.6.2", "11.7.1"],
"cuda": (
["11.6", "11.7"] # default 11.7
if not for_windows
else ["11.6.2", "11.7.1"]
),
},
"2.0.0": {
"python-version": ["3.8", "3.9", "3.10", "3.11"],
"cuda": ["11.7", "11.8"] # default 11.7
if not for_windows
else ["11.7.1", "11.8.0"],
"cuda": (
["11.7", "11.8"] # default 11.7
if not for_windows
else ["11.7.1", "11.8.0"]
),
},
"2.0.1": {
"python-version": ["3.8", "3.9", "3.10", "3.11"],
"cuda": ["11.7", "11.8"] # default 11.7
if not for_windows
else ["11.7.1", "11.8.0"],
"cuda": (
["11.7", "11.8"] # default 11.7
if not for_windows
else ["11.7.1", "11.8.0"]
),
},
"2.1.0": {
"python-version": ["3.8", "3.9", "3.10", "3.11"],
"cuda": (
["11.8", "12.1"] # default 12.1
if not for_windows
else ["11.8.0", "12.1.0"]
),
},
"2.1.1": {
"python-version": ["3.8", "3.9", "3.10", "3.11"],
"cuda": (
["11.8", "12.1"] # default 12.1
if not for_windows
else ["11.8.0", "12.1.0"]
),
},
"2.1.2": {
"python-version": ["3.8", "3.9", "3.10", "3.11"],
"cuda": (
["11.8", "12.1"] # default 12.1
if not for_windows
else ["11.8.0", "12.1.0"]
),
},
"2.2.0": {
"python-version": ["3.8", "3.9", "3.10", "3.11", "3.12"],
"cuda": (
["11.8", "12.1"] # default 12.1
if not for_windows
else ["11.8.0", "12.1.0"]
),
},
"2.2.1": {
"python-version": ["3.8", "3.9", "3.10", "3.11", "3.12"],
"cuda": (
["11.8", "12.1"] # default 12.1
if not for_windows
else ["11.8.0", "12.1.0"]
),
},
"2.2.2": {
"python-version": ["3.8", "3.9", "3.10", "3.11", "3.12"],
"cuda": (
["11.8", "12.1"] # default 12.1
if not for_windows
else ["11.8.0", "12.1.0"]
),
},
"2.3.0": {
"python-version": ["3.8", "3.9", "3.10", "3.11", "3.12"],
"cuda": (
["11.8", "12.1"] # default 12.1
if not for_windows
else ["11.8.0", "12.1.0"]
),
},
"2.3.1": {
"python-version": ["3.8", "3.9", "3.10", "3.11", "3.12"],
"cuda": (
["11.8", "12.1"] # default 12.1
if not for_windows
else ["11.8.0", "12.1.0"]
),
},
"2.4.0": {
"python-version": ["3.8", "3.9", "3.10", "3.11", "3.12"],
"cuda": (
["11.8", "12.1", "12.4"] # default 12.1
if not for_windows
else ["11.8.0", "12.1.0", "12.4.0"]
),
},
"2.4.1": {
"python-version": ["3.8", "3.9", "3.10", "3.11", "3.12"],
"cuda": (
["11.8", "12.1", "12.4"] # default 12.1
if not for_windows
else ["11.8.0", "12.1.0", "12.4.0"]
),
},
"2.5.0": {
# Only Linux supports python 3.13
"python-version": ["3.9", "3.10", "3.11", "3.12", "3.13"],
"cuda": (
["11.8", "12.1", "12.4"] # default 12.4
if not for_windows
else ["11.8.0", "12.1.0", "12.4.0"]
),
},
"2.5.1": {
# Only Linux supports python 3.13
"python-version": ["3.9", "3.10", "3.11", "3.12", "3.13"],
"cuda": (
["11.8", "12.1", "12.4"] # default 12.4
if not for_windows
else ["11.8.0", "12.1.0", "12.4.0"]
),
},
"2.6.0": {
"python-version": ["3.9", "3.10", "3.11", "3.12", "3.13"],
"cuda": (
["11.8", "12.4", "12.6"] # default 12.4
if not for_windows
else ["11.8.0", "12.4.0", "12.6.0"]
),
},
"2.7.0": {
"python-version": ["3.9", "3.10", "3.11", "3.12", "3.13"],
"cuda": (
["11.8", "12.6", "12.8"]
if not for_windows
else ["11.8.0", "12.6.2", "12.8.1"]
),
},
"2.7.1": {
"python-version": ["3.9", "3.10", "3.11", "3.12", "3.13"],
"cuda": (
["11.8", "12.6", "12.8"] # default 12.6
if not for_windows
else ["11.8.0", "12.6.2", "12.8.1"]
),
},
"2.8.0": {
"python-version": ["3.9", "3.10", "3.11", "3.12", "3.13"],
"cuda": (
["12.6", "12.8", "12.9"] # default 12.8
if not for_windows
else ["12.6.2", "12.8.1", "12.9.1"]
),
},
# https://github.com/Jimver/cuda-toolkit/blob/master/src/links/windows-links.ts
}
if test_only_latest_torch:
latest = "2.0.1"
latest = "2.8.0"
matrix = {latest: matrix[latest]}
if for_windows or for_macos:
if "2.5.1" in matrix:
matrix["2.5.1"]["python-version"].remove("3.13")
if "2.5.0" in matrix:
matrix["2.5.0"]["python-version"].remove("3.13")
if "1.13.0" in matrix:
matrix["1.13.0"]["python-version"].remove("3.11")
@ -160,9 +347,18 @@ def generate_build_matrix(enable_cuda, for_windows, for_macos, test_only_latest_
matrix["1.13.1"]["python-version"].remove("3.11")
excluded_python_versions = ["3.6"]
enabled_torch_versions = ["1.10.0"]
enabled_torch_versions += ["1.13.0", "1.13.1"]
min_torch_version = "2.0.0"
ans = []
for torch, python_cuda in matrix.items():
if enabled_torch_versions and torch not in enabled_torch_versions:
if not version_ge(torch, min_torch_version):
continue
python_versions = python_cuda["python-version"]
cuda_versions = python_cuda["cuda"]
if enable_cuda:
@ -173,12 +369,30 @@ def generate_build_matrix(enable_cuda, for_windows, for_macos, test_only_latest_
if c in ["10.1", "11.0"]:
# no docker image for cuda 10.1 and 11.0
continue
if version_ge(torch, "2.7.0") or (
version_ge(torch, "2.6.0") and c == "12.6"
):
# case 1: torch >= 2.7
# case 2: torch == 2.6.0 && cuda == 12.6
ans.append(
{
"torch": torch,
"python-version": p,
"cuda": c,
"image": f"pytorch/manylinux2_28-builder:cuda{c}",
"is_2_28": "1",
}
)
continue
ans.append(
{
"torch": torch,
"python-version": p,
"cuda": c,
"image": f"pytorch/manylinux-builder:cuda{c}",
"is_2_28": "0",
}
)
else:
@ -186,15 +400,53 @@ def generate_build_matrix(enable_cuda, for_windows, for_macos, test_only_latest_
if p in excluded_python_versions:
continue
if for_windows or for_macos:
p = "cp" + "".join(p.split("."))
if for_windows:
ans.append({"torch": torch, "python-version": p})
elif for_macos:
ans.append({"torch": torch, "python-version": p})
elif version_ge(torch, "2.6.0"):
ans.append(
{
"torch": torch,
"python-version": p,
"image": "pytorch/manylinux2_28-builder:cpu"
if not for_arm64
else "pytorch/manylinux2_28_aarch64-builder:cpu-aarch64",
"is_2_28": "1",
}
)
elif version_ge(torch, "2.4.0"):
ans.append(
{
"torch": torch,
"python-version": p,
# "image": "pytorch/manylinux-builder:cpu-2.4",
"image": "pytorch/manylinux-builder:cpu-27677ead7c8293c299a885ae2c474bf445e653a5"
if not for_arm64
else "pytorch/manylinuxaarch64-builder:cpu-aarch64-195148266541a9789074265141cb7dc19dc14c54",
"is_2_28": "0",
}
)
elif version_ge(torch, "2.2.0"):
ans.append(
{
"torch": torch,
"python-version": p,
"image": "pytorch/manylinux-builder:cpu-2.2"
if not for_arm64
else "pytorch/manylinuxaarch64-builder:cpu-aarch64-195148266541a9789074265141cb7dc19dc14c54",
"is_2_28": "0",
}
)
else:
ans.append(
{
"torch": torch,
"python-version": p,
"image": f"pytorch/manylinux-builder:cuda10.2",
"image": "pytorch/manylinux-builder:cuda10.2"
if not for_arm64
else "pytorch/manylinuxaarch64-builder:cpu-aarch64-195148266541a9789074265141cb7dc19dc14c54",
"is_2_28": "0",
}
)
@ -207,6 +459,7 @@ def main():
enable_cuda=args.enable_cuda,
for_windows=args.for_windows,
for_macos=args.for_macos,
for_arm64=args.for_arm64,
test_only_latest_torch=args.test_only_latest_torch,
)

View File

@ -49,6 +49,15 @@ case "$cuda" in
11.7)
url=https://developer.download.nvidia.com/compute/cuda/11.7.1/local_installers/cuda_11.7.1_515.65.01_linux.run
;;
11.8)
url=https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda_11.8.0_520.61.05_linux.run
;;
12.1)
url=https://developer.download.nvidia.com/compute/cuda/12.1.0/local_installers/cuda_12.1.0_530.30.02_linux.run
;;
12.4)
url=https://developer.download.nvidia.com/compute/cuda/12.4.0/local_installers/cuda_12.4.0_550.54.14_linux.run
;;
*)
echo "Unknown cuda version: $cuda"
exit 1
@ -63,10 +72,21 @@ retry curl -LSs -O $url
filename=$(basename $url)
echo "filename: $filename"
chmod +x ./$filename
sudo ./$filename --toolkit --silent
ls -lh
ls -lh /usr/local
sudo ./$filename \
--silent \
--toolkit \
--no-opengl-libs \
--no-drm \
--no-man-page
rm -fv ./$filename
export CUDA_HOME=/usr/local/cuda
export PATH=$CUDA_HOME/bin:$PATH
export LD_LIBRARY_PATH=$CUDA_HOME/lib:$LD_LIBRARY_PATH
export LD_LIBRARY_PATH=$CUDA_HOME/lib64:$LD_LIBRARY_PATH
ls -lh $CUDA_HOME

View File

@ -42,6 +42,15 @@ case $cuda in
11.7)
filename=cudnn-11.3-linux-x64-v8.2.0.53.tgz
;;
11.8)
filename=cudnn-11.3-linux-x64-v8.2.0.53.tgz
;;
12.1)
filename=cudnn-linux-x86_64-8.9.5.29_cuda12-archive.tar.xz
;;
12.4)
filename=cudnn-linux-x86_64-8.9.7.29_cuda12-archive.tar.xz
;;
*)
echo "Unsupported cuda version: $cuda"
exit 1

View File

@ -14,8 +14,13 @@
# See the License for the specific language governing permissions and
# limitations under the License.
torch=$TORCH_VERSION
cuda=$CUDA_VERSION
set -x
if [ x"$TORCH_VERSION" != x"" ] && [ x"$CUDA_VERSION" != x"" ]; then
torch=$TORCH_VERSION
cuda=$CUDA_VERSION
fi
case ${torch} in
1.5.*)
case ${cuda} in
@ -169,6 +174,130 @@ case ${torch} in
;;
esac
;;
2.1.*)
case ${cuda} in
11.8)
package="torch==${torch}+cu118"
url=https://download.pytorch.org/whl/torch_stable.html
;;
12.1)
package="torch==${torch}"
# Leave it empty to use PyPI.
url=
;;
esac
;;
2.2.*)
case ${cuda} in
11.8)
package="torch==${torch}+cu118"
url=https://download.pytorch.org/whl/torch_stable.html
;;
12.1)
package="torch==${torch}"
# Leave it empty to use PyPI.
url=
;;
esac
;;
2.3.*)
case ${cuda} in
11.8)
package="torch==${torch}+cu118"
url=https://download.pytorch.org/whl/torch_stable.html
;;
12.1)
package="torch==${torch}"
# Leave it empty to use PyPI.
url=
;;
esac
;;
2.4.*)
case ${cuda} in
11.8)
package="torch==${torch}+cu118"
# https://download.pytorch.org/whl/nightly/torch/
url=https://download.pytorch.org/whl/torch/
;;
12.1)
package="torch==${torch}+cu121"
url=https://download.pytorch.org/whl/torch/
;;
12.4)
package="torch==${torch}+cu124"
url=https://download.pytorch.org/whl/torch/
;;
esac
;;
2.5.*)
case ${cuda} in
11.8)
package="torch==${torch}+cu118"
# https://download.pytorch.org/whl/nightly/torch/
url=https://download.pytorch.org/whl/torch/
;;
12.1)
package="torch==${torch}+cu121"
url=https://download.pytorch.org/whl/torch/
;;
12.4)
package="torch==${torch}+cu124"
url=https://download.pytorch.org/whl/torch/
;;
esac
;;
2.6.*)
case ${cuda} in
11.8)
package="torch==${torch}+cu118"
# https://download.pytorch.org/whl/nightly/torch/
url=https://download.pytorch.org/whl/torch/
;;
12.4)
package="torch==${torch}+cu124"
url=https://download.pytorch.org/whl/torch/
;;
12.6)
package="torch==${torch}+cu126"
url=https://download.pytorch.org/whl/torch/
;;
esac
;;
2.7.*)
case ${cuda} in
11.8)
package="torch==${torch}+cu118"
# https://download.pytorch.org/whl/nightly/torch/
url=https://download.pytorch.org/whl/torch/
;;
12.6)
package="torch==${torch}+cu126"
url=https://download.pytorch.org/whl/torch/
;;
12.8)
package="torch==${torch}+cu128"
url=https://download.pytorch.org/whl/torch/
;;
esac
;;
2.8.*)
case ${cuda} in
12.6)
package="torch==${torch}+cu126"
# https://download.pytorch.org/whl/nightly/torch/
url=https://download.pytorch.org/whl/torch/
;;
12.8)
package="torch==${torch}+cu128"
url=https://download.pytorch.org/whl/torch/
;;
12.9)
package="torch==${torch}+cu129"
url=https://download.pytorch.org/whl/torch/
;;
esac
;;
*)
echo "Unsupported PyTorch version: ${torch}"
exit 1
@ -185,4 +314,6 @@ else
retry python3 -m pip install -q $package -f $url
fi
rm -rfv ~/.cache/pip
python3 -m torch.utils.collect_env
rm -rf ~/.cache/pip

View File

@ -9,6 +9,7 @@ import setuptools
from cmake.cmake_extension import BuildExtension, bdist_wheel, cmake_extension
import get_version
get_package_version = get_version.get_package_version