mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-03 21:00:08 +06:00
[tests] remove pt_tf
equivalence tests (#36253)
This commit is contained in:
parent
1a81d774b1
commit
0863eef248
@ -28,7 +28,6 @@ COMMON_ENV_VARIABLES = {
|
|||||||
"TRANSFORMERS_IS_CI": True,
|
"TRANSFORMERS_IS_CI": True,
|
||||||
"PYTEST_TIMEOUT": 120,
|
"PYTEST_TIMEOUT": 120,
|
||||||
"RUN_PIPELINE_TESTS": False,
|
"RUN_PIPELINE_TESTS": False,
|
||||||
"RUN_PT_TF_CROSS_TESTS": False,
|
|
||||||
"RUN_PT_FLAX_CROSS_TESTS": False,
|
"RUN_PT_FLAX_CROSS_TESTS": False,
|
||||||
}
|
}
|
||||||
# Disable the use of {"s": None} as the output is way too long, causing the navigation on CircleCI impractical
|
# Disable the use of {"s": None} as the output is way too long, causing the navigation on CircleCI impractical
|
||||||
@ -177,15 +176,6 @@ class CircleCIJob:
|
|||||||
|
|
||||||
|
|
||||||
# JOBS
|
# JOBS
|
||||||
torch_and_tf_job = CircleCIJob(
|
|
||||||
"torch_and_tf",
|
|
||||||
docker_image=[{"image":"huggingface/transformers-torch-tf-light"}],
|
|
||||||
additional_env={"RUN_PT_TF_CROSS_TESTS": True},
|
|
||||||
marker="is_pt_tf_cross_test",
|
|
||||||
pytest_options={"rA": None, "durations": 0},
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
torch_and_flax_job = CircleCIJob(
|
torch_and_flax_job = CircleCIJob(
|
||||||
"torch_and_flax",
|
"torch_and_flax",
|
||||||
additional_env={"RUN_PT_FLAX_CROSS_TESTS": True},
|
additional_env={"RUN_PT_FLAX_CROSS_TESTS": True},
|
||||||
@ -353,7 +343,7 @@ doc_test_job = CircleCIJob(
|
|||||||
pytest_num_workers=1,
|
pytest_num_workers=1,
|
||||||
)
|
)
|
||||||
|
|
||||||
REGULAR_TESTS = [torch_and_tf_job, torch_and_flax_job, torch_job, tf_job, flax_job, hub_job, onnx_job, tokenization_job, processor_job, generate_job, non_model_job] # fmt: skip
|
REGULAR_TESTS = [torch_and_flax_job, torch_job, tf_job, flax_job, hub_job, onnx_job, tokenization_job, processor_job, generate_job, non_model_job] # fmt: skip
|
||||||
EXAMPLES_TESTS = [examples_torch_job, examples_tensorflow_job]
|
EXAMPLES_TESTS = [examples_torch_job, examples_tensorflow_job]
|
||||||
PIPELINE_TESTS = [pipelines_torch_job, pipelines_tf_job]
|
PIPELINE_TESTS = [pipelines_torch_job, pipelines_tf_job]
|
||||||
REPO_UTIL_TESTS = [repo_utils_job]
|
REPO_UTIL_TESTS = [repo_utils_job]
|
||||||
|
@ -22,7 +22,6 @@ env:
|
|||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
RUN_PT_TF_CROSS_TESTS: 1
|
|
||||||
CUDA_VISIBLE_DEVICES: 0,1
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
|
|
||||||
|
|
||||||
|
1
.github/workflows/model_jobs.yml
vendored
1
.github/workflows/model_jobs.yml
vendored
@ -30,7 +30,6 @@ env:
|
|||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
RUN_PT_TF_CROSS_TESTS: 1
|
|
||||||
CUDA_VISIBLE_DEVICES: 0,1
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
1
.github/workflows/model_jobs_amd.yml
vendored
1
.github/workflows/model_jobs_amd.yml
vendored
@ -30,7 +30,6 @@ env:
|
|||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
RUN_PT_TF_CROSS_TESTS: 1
|
|
||||||
CUDA_VISIBLE_DEVICES: 0,1
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
43
.github/workflows/push-important-models.yml
vendored
43
.github/workflows/push-important-models.yml
vendored
@ -7,14 +7,13 @@ on:
|
|||||||
env:
|
env:
|
||||||
OUTPUT_SLACK_CHANNEL_ID: "C06L2SGMEEA"
|
OUTPUT_SLACK_CHANNEL_ID: "C06L2SGMEEA"
|
||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
HF_HOME: /mnt/cache
|
HF_HOME: /mnt/cache
|
||||||
TRANSFORMERS_IS_CI: yes
|
TRANSFORMERS_IS_CI: yes
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
RUN_SLOW: yes # For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access. # This token is created under the bot `hf-transformers-bot`.
|
RUN_SLOW: yes # For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access. # This token is created under the bot `hf-transformers-bot`.
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
RUN_PT_TF_CROSS_TESTS: 1
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
get_modified_models:
|
get_modified_models:
|
||||||
@ -25,13 +24,13 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Get changed files
|
- name: Get changed files
|
||||||
id: changed-files
|
id: changed-files
|
||||||
uses: tj-actions/changed-files@3f54ebb830831fc121d3263c1857cfbdc310cdb9 #v42
|
uses: tj-actions/changed-files@3f54ebb830831fc121d3263c1857cfbdc310cdb9 #v42
|
||||||
with:
|
with:
|
||||||
files: src/transformers/models/**
|
files: src/transformers/models/**
|
||||||
|
|
||||||
- name: Run step if only the files listed above change
|
- name: Run step if only the files listed above change
|
||||||
if: steps.changed-files.outputs.any_changed == 'true'
|
if: steps.changed-files.outputs.any_changed == 'true'
|
||||||
id: set-matrix
|
id: set-matrix
|
||||||
@ -60,41 +59,41 @@ jobs:
|
|||||||
if: ${{ needs.get_modified_models.outputs.matrix != '[]' && needs.get_modified_models.outputs.matrix != '' && fromJson(needs.get_modified_models.outputs.matrix)[0] != null }}
|
if: ${{ needs.get_modified_models.outputs.matrix != '[]' && needs.get_modified_models.outputs.matrix != '' && fromJson(needs.get_modified_models.outputs.matrix)[0] != null }}
|
||||||
strategy:
|
strategy:
|
||||||
fail-fast: false
|
fail-fast: false
|
||||||
matrix:
|
matrix:
|
||||||
model-name: ${{ fromJson(needs.get_modified_models.outputs.matrix) }}
|
model-name: ${{ fromJson(needs.get_modified_models.outputs.matrix) }}
|
||||||
|
|
||||||
steps:
|
steps:
|
||||||
- name: Check out code
|
- name: Check out code
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v4
|
||||||
|
|
||||||
- name: Install locally transformers & other libs
|
- name: Install locally transformers & other libs
|
||||||
run: |
|
run: |
|
||||||
apt install sudo
|
apt install sudo
|
||||||
sudo -H pip install --upgrade pip
|
sudo -H pip install --upgrade pip
|
||||||
sudo -H pip uninstall -y transformers
|
sudo -H pip uninstall -y transformers
|
||||||
sudo -H pip install -U -e ".[testing]"
|
sudo -H pip install -U -e ".[testing]"
|
||||||
MAX_JOBS=4 pip install flash-attn --no-build-isolation
|
MAX_JOBS=4 pip install flash-attn --no-build-isolation
|
||||||
pip install bitsandbytes
|
pip install bitsandbytes
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
|
|
||||||
- name: Show installed libraries and their versions
|
- name: Show installed libraries and their versions
|
||||||
run: pip freeze
|
run: pip freeze
|
||||||
|
|
||||||
- name: Run FA2 tests
|
- name: Run FA2 tests
|
||||||
id: run_fa2_tests
|
id: run_fa2_tests
|
||||||
run:
|
run:
|
||||||
pytest -rsfE -m "flash_attn_test" --make-reports=${{ matrix.model-name }}_fa2_tests/ tests/${{ matrix.model-name }}/test_modeling_*
|
pytest -rsfE -m "flash_attn_test" --make-reports=${{ matrix.model-name }}_fa2_tests/ tests/${{ matrix.model-name }}/test_modeling_*
|
||||||
|
|
||||||
- name: "Test suite reports artifacts: ${{ matrix.model-name }}_fa2_tests"
|
- name: "Test suite reports artifacts: ${{ matrix.model-name }}_fa2_tests"
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
with:
|
with:
|
||||||
name: ${{ matrix.model-name }}_fa2_tests
|
name: ${{ matrix.model-name }}_fa2_tests
|
||||||
path: /transformers/reports/${{ matrix.model-name }}_fa2_tests
|
path: /transformers/reports/${{ matrix.model-name }}_fa2_tests
|
||||||
|
|
||||||
- name: Post to Slack
|
- name: Post to Slack
|
||||||
if: always()
|
if: always()
|
||||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||||
@ -103,13 +102,13 @@ jobs:
|
|||||||
title: 🤗 Results of the FA2 tests - ${{ matrix.model-name }}
|
title: 🤗 Results of the FA2 tests - ${{ matrix.model-name }}
|
||||||
status: ${{ steps.run_fa2_tests.conclusion}}
|
status: ${{ steps.run_fa2_tests.conclusion}}
|
||||||
slack_token: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
slack_token: ${{ secrets.CI_SLACK_BOT_TOKEN }}
|
||||||
|
|
||||||
- name: Run integration tests
|
- name: Run integration tests
|
||||||
id: run_integration_tests
|
id: run_integration_tests
|
||||||
if: always()
|
if: always()
|
||||||
run:
|
run:
|
||||||
pytest -rsfE -k "IntegrationTest" --make-reports=tests_integration_${{ matrix.model-name }} tests/${{ matrix.model-name }}/test_modeling_*
|
pytest -rsfE -k "IntegrationTest" --make-reports=tests_integration_${{ matrix.model-name }} tests/${{ matrix.model-name }}/test_modeling_*
|
||||||
|
|
||||||
- name: "Test suite reports artifacts: tests_integration_${{ matrix.model-name }}"
|
- name: "Test suite reports artifacts: tests_integration_${{ matrix.model-name }}"
|
||||||
if: ${{ always() }}
|
if: ${{ always() }}
|
||||||
uses: actions/upload-artifact@v4
|
uses: actions/upload-artifact@v4
|
||||||
@ -119,7 +118,7 @@ jobs:
|
|||||||
|
|
||||||
- name: Post to Slack
|
- name: Post to Slack
|
||||||
if: always()
|
if: always()
|
||||||
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
uses: huggingface/hf-workflows/.github/actions/post-slack@main
|
||||||
with:
|
with:
|
||||||
slack_channel: ${{ env.OUTPUT_SLACK_CHANNEL_ID }}
|
slack_channel: ${{ env.OUTPUT_SLACK_CHANNEL_ID }}
|
||||||
title: 🤗 Results of the Integration tests - ${{ matrix.model-name }}
|
title: 🤗 Results of the Integration tests - ${{ matrix.model-name }}
|
||||||
|
1
.github/workflows/self-comment-ci.yml
vendored
1
.github/workflows/self-comment-ci.yml
vendored
@ -22,7 +22,6 @@ env:
|
|||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
RUN_PT_TF_CROSS_TESTS: 1
|
|
||||||
CUDA_VISIBLE_DEVICES: 0,1
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
1
.github/workflows/self-push-amd.yml
vendored
1
.github/workflows/self-push-amd.yml
vendored
@ -14,7 +14,6 @@ env:
|
|||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
PYTEST_TIMEOUT: 60
|
PYTEST_TIMEOUT: 60
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
RUN_PT_TF_CROSS_TESTS: 1
|
|
||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
|
9
.github/workflows/self-push.yml
vendored
9
.github/workflows/self-push.yml
vendored
@ -24,7 +24,6 @@ env:
|
|||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
PYTEST_TIMEOUT: 60
|
PYTEST_TIMEOUT: 60
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
RUN_PT_TF_CROSS_TESTS: 1
|
|
||||||
CUDA_VISIBLE_DEVICES: 0,1
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
@ -293,7 +292,7 @@ jobs:
|
|||||||
|
|
||||||
echo "$machine_type"
|
echo "$machine_type"
|
||||||
echo "machine_type=$machine_type" >> $GITHUB_ENV
|
echo "machine_type=$machine_type" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Update clone using environment variables
|
- name: Update clone using environment variables
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: |
|
run: |
|
||||||
@ -406,7 +405,7 @@ jobs:
|
|||||||
|
|
||||||
echo "$machine_type"
|
echo "$machine_type"
|
||||||
echo "machine_type=$machine_type" >> $GITHUB_ENV
|
echo "machine_type=$machine_type" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Update clone using environment variables
|
- name: Update clone using environment variables
|
||||||
working-directory: /workspace/transformers
|
working-directory: /workspace/transformers
|
||||||
run: |
|
run: |
|
||||||
@ -516,7 +515,7 @@ jobs:
|
|||||||
|
|
||||||
echo "$machine_type"
|
echo "$machine_type"
|
||||||
echo "machine_type=$machine_type" >> $GITHUB_ENV
|
echo "machine_type=$machine_type" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Update clone using environment variables
|
- name: Update clone using environment variables
|
||||||
working-directory: /workspace/transformers
|
working-directory: /workspace/transformers
|
||||||
run: |
|
run: |
|
||||||
@ -648,6 +647,6 @@ jobs:
|
|||||||
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
# `models/bert` to `models_bert` is required, as the artifact names use `_` instead of `/`.
|
||||||
run: |
|
run: |
|
||||||
pip install huggingface_hub
|
pip install huggingface_hub
|
||||||
pip install slack_sdk
|
pip install slack_sdk
|
||||||
pip show slack_sdk
|
pip show slack_sdk
|
||||||
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
python utils/notification_service.py "${{ needs.setup.outputs.matrix }}"
|
||||||
|
3
.github/workflows/self-scheduled.yml
vendored
3
.github/workflows/self-scheduled.yml
vendored
@ -40,7 +40,6 @@ env:
|
|||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
RUN_PT_TF_CROSS_TESTS: 1
|
|
||||||
CUDA_VISIBLE_DEVICES: 0,1
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
NUM_SLICES: 2
|
NUM_SLICES: 2
|
||||||
|
|
||||||
@ -571,4 +570,4 @@ jobs:
|
|||||||
with:
|
with:
|
||||||
docker: ${{ inputs.docker }}
|
docker: ${{ inputs.docker }}
|
||||||
start_sha: ${{ github.sha }}
|
start_sha: ${{ github.sha }}
|
||||||
secrets: inherit
|
secrets: inherit
|
||||||
|
19
.github/workflows/ssh-runner.yml
vendored
19
.github/workflows/ssh-runner.yml
vendored
@ -5,7 +5,7 @@ on:
|
|||||||
inputs:
|
inputs:
|
||||||
runner_type:
|
runner_type:
|
||||||
description: 'Type of runner to test (a10 or t4)'
|
description: 'Type of runner to test (a10 or t4)'
|
||||||
required: true
|
required: true
|
||||||
docker_image:
|
docker_image:
|
||||||
description: 'Name of the Docker image'
|
description: 'Name of the Docker image'
|
||||||
required: true
|
required: true
|
||||||
@ -15,15 +15,14 @@ on:
|
|||||||
|
|
||||||
env:
|
env:
|
||||||
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
HF_HUB_READ_TOKEN: ${{ secrets.HF_HUB_READ_TOKEN }}
|
||||||
HF_HOME: /mnt/cache
|
HF_HOME: /mnt/cache
|
||||||
TRANSFORMERS_IS_CI: yes
|
TRANSFORMERS_IS_CI: yes
|
||||||
OMP_NUM_THREADS: 8
|
OMP_NUM_THREADS: 8
|
||||||
MKL_NUM_THREADS: 8
|
MKL_NUM_THREADS: 8
|
||||||
RUN_SLOW: yes # For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access. # This token is created under the bot `hf-transformers-bot`.
|
RUN_SLOW: yes # For gated repositories, we still need to agree to share information on the Hub repo. page in order to get access. # This token is created under the bot `hf-transformers-bot`.
|
||||||
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
SIGOPT_API_TOKEN: ${{ secrets.SIGOPT_API_TOKEN }}
|
||||||
TF_FORCE_GPU_ALLOW_GROWTH: true
|
TF_FORCE_GPU_ALLOW_GROWTH: true
|
||||||
CUDA_VISIBLE_DEVICES: 0,1
|
CUDA_VISIBLE_DEVICES: 0,1
|
||||||
RUN_PT_TF_CROSS_TESTS: 1
|
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
get_runner:
|
get_runner:
|
||||||
@ -78,7 +77,7 @@ jobs:
|
|||||||
- name: Show installed libraries and their versions
|
- name: Show installed libraries and their versions
|
||||||
working-directory: /transformers
|
working-directory: /transformers
|
||||||
run: pip freeze
|
run: pip freeze
|
||||||
|
|
||||||
- name: NVIDIA-SMI
|
- name: NVIDIA-SMI
|
||||||
run: |
|
run: |
|
||||||
nvidia-smi
|
nvidia-smi
|
||||||
|
@ -344,7 +344,6 @@ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/t
|
|||||||
Like the slow tests, there are other environment variables available which are not enabled by default during testing:
|
Like the slow tests, there are other environment variables available which are not enabled by default during testing:
|
||||||
- `RUN_CUSTOM_TOKENIZERS`: Enables tests for custom tokenizers.
|
- `RUN_CUSTOM_TOKENIZERS`: Enables tests for custom tokenizers.
|
||||||
- `RUN_PT_FLAX_CROSS_TESTS`: Enables tests for PyTorch + Flax integration.
|
- `RUN_PT_FLAX_CROSS_TESTS`: Enables tests for PyTorch + Flax integration.
|
||||||
- `RUN_PT_TF_CROSS_TESTS`: Enables tests for TensorFlow + PyTorch integration.
|
|
||||||
|
|
||||||
More environment variables and additional information can be found in the [testing_utils.py](https://github.com/huggingface/transformers/blob/main/src/transformers/testing_utils.py).
|
More environment variables and additional information can be found in the [testing_utils.py](https://github.com/huggingface/transformers/blob/main/src/transformers/testing_utils.py).
|
||||||
|
|
||||||
|
@ -61,7 +61,6 @@ NOT_DEVICE_TESTS = {
|
|||||||
"test_load_save_without_tied_weights",
|
"test_load_save_without_tied_weights",
|
||||||
"test_tied_weights_keys",
|
"test_tied_weights_keys",
|
||||||
"test_model_weights_reload_no_missing_tied_weights",
|
"test_model_weights_reload_no_missing_tied_weights",
|
||||||
"test_pt_tf_model_equivalence",
|
|
||||||
"test_mismatched_shapes_have_properly_initialized_weights",
|
"test_mismatched_shapes_have_properly_initialized_weights",
|
||||||
"test_matched_shapes_have_loaded_weights_when_some_mismatched_shapes_exist",
|
"test_matched_shapes_have_loaded_weights_when_some_mismatched_shapes_exist",
|
||||||
"test_model_is_small",
|
"test_model_is_small",
|
||||||
@ -85,9 +84,6 @@ warnings.simplefilter(action="ignore", category=FutureWarning)
|
|||||||
|
|
||||||
|
|
||||||
def pytest_configure(config):
|
def pytest_configure(config):
|
||||||
config.addinivalue_line(
|
|
||||||
"markers", "is_pt_tf_cross_test: mark test to run only when PT and TF interactions are tested"
|
|
||||||
)
|
|
||||||
config.addinivalue_line(
|
config.addinivalue_line(
|
||||||
"markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested"
|
"markers", "is_pt_flax_cross_test: mark test to run only when PT and FLAX interactions are tested"
|
||||||
)
|
)
|
||||||
|
@ -284,7 +284,6 @@ Wie bei den langsamen Tests gibt es auch andere Umgebungsvariablen, die standard
|
|||||||
|
|
||||||
* `RUN_CUSTOM_TOKENIZERS`: Aktiviert Tests für benutzerdefinierte Tokenizer.
|
* `RUN_CUSTOM_TOKENIZERS`: Aktiviert Tests für benutzerdefinierte Tokenizer.
|
||||||
* `RUN_PT_FLAX_CROSS_TESTS`: Aktiviert Tests für die Integration von PyTorch + Flax.
|
* `RUN_PT_FLAX_CROSS_TESTS`: Aktiviert Tests für die Integration von PyTorch + Flax.
|
||||||
* `RUN_PT_TF_CROSS_TESTS`: Aktiviert Tests für die Integration von TensorFlow + PyTorch.
|
|
||||||
|
|
||||||
Weitere Umgebungsvariablen und zusätzliche Informationen finden Sie in der [testing_utils.py](src/transformers/testing_utils.py).
|
Weitere Umgebungsvariablen und zusätzliche Informationen finden Sie in der [testing_utils.py](src/transformers/testing_utils.py).
|
||||||
|
|
||||||
|
@ -85,7 +85,7 @@ python src/transformers/commands/transformers_cli.py env
|
|||||||
3. 해당 기능의 사용법을 보여주는 *코드 스니펫*을 제공해 주세요.
|
3. 해당 기능의 사용법을 보여주는 *코드 스니펫*을 제공해 주세요.
|
||||||
4. 기능과 관련된 논문이 있는 경우 링크를 포함해 주세요.
|
4. 기능과 관련된 논문이 있는 경우 링크를 포함해 주세요.
|
||||||
|
|
||||||
이슈가 잘 작성되었다면 이슈가 생성된 순간, 이미 80% 정도의 작업이 완료된 것입니다.
|
이슈가 잘 작성되었다면 이슈가 생성된 순간, 이미 80% 정도의 작업이 완료된 것입니다.
|
||||||
|
|
||||||
이슈를 제기하는 데 도움이 될 만한 [템플릿](https://github.com/huggingface/transformers/tree/main/templates)도 준비되어 있습니다.
|
이슈를 제기하는 데 도움이 될 만한 [템플릿](https://github.com/huggingface/transformers/tree/main/templates)도 준비되어 있습니다.
|
||||||
|
|
||||||
@ -140,7 +140,7 @@ python src/transformers/commands/transformers_cli.py env
|
|||||||
```
|
```
|
||||||
|
|
||||||
만약 이미 가상 환경에 🤗 Transformers가 설치되어 있다면, `-e` 플래그를 사용하여 설치하기 전에 `pip uninstall transformers`로 제거해주세요.
|
만약 이미 가상 환경에 🤗 Transformers가 설치되어 있다면, `-e` 플래그를 사용하여 설치하기 전에 `pip uninstall transformers`로 제거해주세요.
|
||||||
|
|
||||||
여러분의 운영체제에 따라서, 그리고 🤗 Transformers의 선택적 의존성의 수가 증가하면서, 이 명령이 실패할 수도 있습니다. 그럴 경우 사용하려는 딥러닝 프레임워크(PyTorch, TensorFlow, 그리고/또는 Flax)를 설치한 후 아래 명령을 실행해주세요:
|
여러분의 운영체제에 따라서, 그리고 🤗 Transformers의 선택적 의존성의 수가 증가하면서, 이 명령이 실패할 수도 있습니다. 그럴 경우 사용하려는 딥러닝 프레임워크(PyTorch, TensorFlow, 그리고/또는 Flax)를 설치한 후 아래 명령을 실행해주세요:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -188,7 +188,7 @@ python src/transformers/commands/transformers_cli.py env
|
|||||||
이러한 검사에 대해 자세히 알아보고 관련 문제를 해결하는 방법은 [Pull Request에 대한 검사](https://huggingface.co/docs/transformers/pr_checks) 가이드를 확인하세요.
|
이러한 검사에 대해 자세히 알아보고 관련 문제를 해결하는 방법은 [Pull Request에 대한 검사](https://huggingface.co/docs/transformers/pr_checks) 가이드를 확인하세요.
|
||||||
|
|
||||||
만약 `docs/source` 디렉터리 아래의 문서를 수정하는 경우, 문서가 빌드될 수 있는지 확인하세요. 이 검사는 Pull Request를 열 때도 CI에서 실행됩니다. 로컬 검사를 실행하려면 문서 빌더를 설치해야 합니다:
|
만약 `docs/source` 디렉터리 아래의 문서를 수정하는 경우, 문서가 빌드될 수 있는지 확인하세요. 이 검사는 Pull Request를 열 때도 CI에서 실행됩니다. 로컬 검사를 실행하려면 문서 빌더를 설치해야 합니다:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install ".[docs]"
|
pip install ".[docs]"
|
||||||
```
|
```
|
||||||
@ -216,7 +216,7 @@ python src/transformers/commands/transformers_cli.py env
|
|||||||
git fetch upstream
|
git fetch upstream
|
||||||
git rebase upstream/main
|
git rebase upstream/main
|
||||||
```
|
```
|
||||||
|
|
||||||
변경 사항을 브랜치에 푸시하세요:
|
변경 사항을 브랜치에 푸시하세요:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -238,7 +238,7 @@ python src/transformers/commands/transformers_cli.py env
|
|||||||
☐ 새로운 기능을 추가하는 경우, 해당 기능에 대한 테스트도 추가하세요.<br>
|
☐ 새로운 기능을 추가하는 경우, 해당 기능에 대한 테스트도 추가하세요.<br>
|
||||||
- 새 모델을 추가하는 경우, `ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)`을 사용하여 일반적인 테스트를 활성화하세요.
|
- 새 모델을 추가하는 경우, `ModelTester.all_model_classes = (MyModel, MyModelWithLMHead,...)`을 사용하여 일반적인 테스트를 활성화하세요.
|
||||||
- 새 `@slow` 테스트를 추가하는 경우, 다음 명령으로 테스트를 통과하는지 확인하세요: `RUN_SLOW=1 python -m pytest tests/models/my_new_model/test_my_new_model.py`.
|
- 새 `@slow` 테스트를 추가하는 경우, 다음 명령으로 테스트를 통과하는지 확인하세요: `RUN_SLOW=1 python -m pytest tests/models/my_new_model/test_my_new_model.py`.
|
||||||
- 새 토크나이저를 추가하는 경우, 테스트를 작성하고 다음 명령으로 테스트를 통과하는지 확인하세요: `RUN_SLOW=1 python -m pytest tests/models/{your_model_name}/test_tokenization_{your_model_name}.py`.
|
- 새 토크나이저를 추가하는 경우, 테스트를 작성하고 다음 명령으로 테스트를 통과하는지 확인하세요: `RUN_SLOW=1 python -m pytest tests/models/{your_model_name}/test_tokenization_{your_model_name}.py`.
|
||||||
- CircleCI에서는 느린 테스트를 실행하지 않지만, GitHub Actions에서는 매일 밤 실행됩니다!<br>
|
- CircleCI에서는 느린 테스트를 실행하지 않지만, GitHub Actions에서는 매일 밤 실행됩니다!<br>
|
||||||
|
|
||||||
☐ 모든 공개 메소드는 유용한 기술문서를 가져야 합니다 (예를 들어 [`modeling_bert.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py) 참조).<br>
|
☐ 모든 공개 메소드는 유용한 기술문서를 가져야 합니다 (예를 들어 [`modeling_bert.py`](https://github.com/huggingface/transformers/blob/main/src/transformers/models/bert/modeling_bert.py) 참조).<br>
|
||||||
@ -283,7 +283,6 @@ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/t
|
|||||||
느린 테스트와 마찬가지로, 다음과 같이 테스트 중에 기본적으로 활성화되지 않는 다른 환경 변수도 있습니다:
|
느린 테스트와 마찬가지로, 다음과 같이 테스트 중에 기본적으로 활성화되지 않는 다른 환경 변수도 있습니다:
|
||||||
- `RUN_CUSTOM_TOKENIZERS`: 사용자 정의 토크나이저 테스트를 활성화합니다.
|
- `RUN_CUSTOM_TOKENIZERS`: 사용자 정의 토크나이저 테스트를 활성화합니다.
|
||||||
- `RUN_PT_FLAX_CROSS_TESTS`: PyTorch + Flax 통합 테스트를 활성화합니다.
|
- `RUN_PT_FLAX_CROSS_TESTS`: PyTorch + Flax 통합 테스트를 활성화합니다.
|
||||||
- `RUN_PT_TF_CROSS_TESTS`: TensorFlow + PyTorch 통합 테스트를 활성화합니다.
|
|
||||||
|
|
||||||
더 많은 환경 변수와 추가 정보는 [testing_utils.py](src/transformers/testing_utils.py)에서 찾을 수 있습니다.
|
더 많은 환경 변수와 추가 정보는 [testing_utils.py](src/transformers/testing_utils.py)에서 찾을 수 있습니다.
|
||||||
|
|
||||||
|
@ -33,7 +33,7 @@ limitations under the License.
|
|||||||
* 实现新的模型。
|
* 实现新的模型。
|
||||||
* 为示例或文档做贡献。
|
* 为示例或文档做贡献。
|
||||||
|
|
||||||
如果你不知道从哪里开始,有一个特别的 [Good First Issue](https://github.com/huggingface/transformers/contribute) 列表。它会列出一些适合初学者的开放的 issues,并帮助你开始为开源项目做贡献。只需要在你想要处理的 issue 下发表评论就行。
|
如果你不知道从哪里开始,有一个特别的 [Good First Issue](https://github.com/huggingface/transformers/contribute) 列表。它会列出一些适合初学者的开放的 issues,并帮助你开始为开源项目做贡献。只需要在你想要处理的 issue 下发表评论就行。
|
||||||
|
|
||||||
如果想要稍微更有挑战性的内容,你也可以查看 [Good Second Issue](https://github.com/huggingface/transformers/labels/Good%20Second%20Issue) 列表。总的来说,如果你觉得自己知道该怎么做,就去做吧,我们会帮助你达到目标的!🚀
|
如果想要稍微更有挑战性的内容,你也可以查看 [Good Second Issue](https://github.com/huggingface/transformers/labels/Good%20Second%20Issue) 列表。总的来说,如果你觉得自己知道该怎么做,就去做吧,我们会帮助你达到目标的!🚀
|
||||||
|
|
||||||
@ -139,7 +139,7 @@ python src/transformers/commands/transformers_cli.py env
|
|||||||
```
|
```
|
||||||
|
|
||||||
如果在虚拟环境中已经安装了 🤗 Transformers,请先使用 `pip uninstall transformers` 卸载它,然后再用 `-e` 参数以可编辑模式重新安装。
|
如果在虚拟环境中已经安装了 🤗 Transformers,请先使用 `pip uninstall transformers` 卸载它,然后再用 `-e` 参数以可编辑模式重新安装。
|
||||||
|
|
||||||
根据你的操作系统,以及 Transformers 的可选依赖项数量的增加,可能会在执行此命令时出现失败。如果出现这种情况,请确保已经安装了你想使用的深度学习框架(PyTorch, TensorFlow 和 Flax),然后执行以下操作:
|
根据你的操作系统,以及 Transformers 的可选依赖项数量的增加,可能会在执行此命令时出现失败。如果出现这种情况,请确保已经安装了你想使用的深度学习框架(PyTorch, TensorFlow 和 Flax),然后执行以下操作:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
@ -187,7 +187,7 @@ python src/transformers/commands/transformers_cli.py env
|
|||||||
想要了解有关这些检查及如何解决相关问题的更多信息,请阅读 [检查 Pull Request](https://huggingface.co/docs/transformers/pr_checks) 指南。
|
想要了解有关这些检查及如何解决相关问题的更多信息,请阅读 [检查 Pull Request](https://huggingface.co/docs/transformers/pr_checks) 指南。
|
||||||
|
|
||||||
如果你修改了 `docs/source` 目录下的文档,请确保文档仍然能够被构建。这个检查也会在你创建 PR 时在 CI 中运行。如果要进行本地检查,请确保安装了文档构建工具:
|
如果你修改了 `docs/source` 目录下的文档,请确保文档仍然能够被构建。这个检查也会在你创建 PR 时在 CI 中运行。如果要进行本地检查,请确保安装了文档构建工具:
|
||||||
|
|
||||||
```bash
|
```bash
|
||||||
pip install ".[docs]"
|
pip install ".[docs]"
|
||||||
```
|
```
|
||||||
@ -282,7 +282,6 @@ RUN_SLOW=yes python -m pytest -n auto --dist=loadfile -s -v ./examples/pytorch/t
|
|||||||
和时间较长的测试一样,还有其他环境变量在测试过程中,在默认情况下是未启用的:
|
和时间较长的测试一样,还有其他环境变量在测试过程中,在默认情况下是未启用的:
|
||||||
- `RUN_CUSTOM_TOKENIZERS`: 启用自定义分词器的测试。
|
- `RUN_CUSTOM_TOKENIZERS`: 启用自定义分词器的测试。
|
||||||
- `RUN_PT_FLAX_CROSS_TESTS`: 启用 PyTorch + Flax 整合的测试。
|
- `RUN_PT_FLAX_CROSS_TESTS`: 启用 PyTorch + Flax 整合的测试。
|
||||||
- `RUN_PT_TF_CROSS_TESTS`: 启用 TensorFlow + PyTorch 整合的测试。
|
|
||||||
|
|
||||||
更多环境变量和额外信息可以在 [testing_utils.py](src/transformers/testing_utils.py) 中找到。
|
更多环境变量和额外信息可以在 [testing_utils.py](src/transformers/testing_utils.py) 中找到。
|
||||||
|
|
||||||
|
1
setup.py
1
setup.py
@ -473,7 +473,6 @@ setup(
|
|||||||
extras["tests_torch"] = deps_list()
|
extras["tests_torch"] = deps_list()
|
||||||
extras["tests_tf"] = deps_list()
|
extras["tests_tf"] = deps_list()
|
||||||
extras["tests_flax"] = deps_list()
|
extras["tests_flax"] = deps_list()
|
||||||
extras["tests_torch_and_tf"] = deps_list()
|
|
||||||
extras["tests_torch_and_flax"] = deps_list()
|
extras["tests_torch_and_flax"] = deps_list()
|
||||||
extras["tests_hub"] = deps_list()
|
extras["tests_hub"] = deps_list()
|
||||||
extras["tests_pipelines_torch"] = deps_list()
|
extras["tests_pipelines_torch"] = deps_list()
|
||||||
|
@ -230,7 +230,6 @@ def parse_int_from_env(key, default=None):
|
|||||||
|
|
||||||
|
|
||||||
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
|
_run_slow_tests = parse_flag_from_env("RUN_SLOW", default=False)
|
||||||
_run_pt_tf_cross_tests = parse_flag_from_env("RUN_PT_TF_CROSS_TESTS", default=True)
|
|
||||||
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
|
_run_pt_flax_cross_tests = parse_flag_from_env("RUN_PT_FLAX_CROSS_TESTS", default=True)
|
||||||
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
|
_run_custom_tokenizers = parse_flag_from_env("RUN_CUSTOM_TOKENIZERS", default=False)
|
||||||
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
|
_run_staging = parse_flag_from_env("HUGGINGFACE_CO_STAGING", default=False)
|
||||||
@ -251,25 +250,6 @@ def get_device_count():
|
|||||||
return num_devices
|
return num_devices
|
||||||
|
|
||||||
|
|
||||||
def is_pt_tf_cross_test(test_case):
|
|
||||||
"""
|
|
||||||
Decorator marking a test as a test that control interactions between PyTorch and TensorFlow.
|
|
||||||
|
|
||||||
PT+TF tests are skipped by default and we can run only them by setting RUN_PT_TF_CROSS_TESTS environment variable
|
|
||||||
to a truthy value and selecting the is_pt_tf_cross_test pytest mark.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if not _run_pt_tf_cross_tests or not is_torch_available() or not is_tf_available():
|
|
||||||
return unittest.skip(reason="test is PT+TF test")(test_case)
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
import pytest # We don't need a hard dependency on pytest in the main library
|
|
||||||
except ImportError:
|
|
||||||
return test_case
|
|
||||||
else:
|
|
||||||
return pytest.mark.is_pt_tf_cross_test()(test_case)
|
|
||||||
|
|
||||||
|
|
||||||
def is_pt_flax_cross_test(test_case):
|
def is_pt_flax_cross_test(test_case):
|
||||||
"""
|
"""
|
||||||
Decorator marking a test as a test that control interactions between PyTorch and Flax
|
Decorator marking a test as a test that control interactions between PyTorch and Flax
|
||||||
|
@ -1,228 +0,0 @@
|
|||||||
# coding=utf-8
|
|
||||||
# Copyright 2020 The HuggingFace Team. All rights reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
||||||
# you may not use this file except in compliance with the License.
|
|
||||||
# You may obtain a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
||||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
||||||
# See the License for the specific language governing permissions and
|
|
||||||
# limitations under the License.
|
|
||||||
|
|
||||||
|
|
||||||
from __future__ import annotations
|
|
||||||
|
|
||||||
import unittest
|
|
||||||
|
|
||||||
from transformers import is_tf_available, is_torch_available
|
|
||||||
from transformers.testing_utils import DUMMY_UNKNOWN_IDENTIFIER, SMALL_MODEL_IDENTIFIER, is_pt_tf_cross_test, slow
|
|
||||||
|
|
||||||
|
|
||||||
if is_tf_available():
|
|
||||||
from transformers import (
|
|
||||||
AutoConfig,
|
|
||||||
BertConfig,
|
|
||||||
GPT2Config,
|
|
||||||
T5Config,
|
|
||||||
TFAutoModel,
|
|
||||||
TFAutoModelForCausalLM,
|
|
||||||
TFAutoModelForMaskedLM,
|
|
||||||
TFAutoModelForPreTraining,
|
|
||||||
TFAutoModelForQuestionAnswering,
|
|
||||||
TFAutoModelForSeq2SeqLM,
|
|
||||||
TFAutoModelForSequenceClassification,
|
|
||||||
TFAutoModelWithLMHead,
|
|
||||||
TFBertForMaskedLM,
|
|
||||||
TFBertForPreTraining,
|
|
||||||
TFBertForQuestionAnswering,
|
|
||||||
TFBertForSequenceClassification,
|
|
||||||
TFBertModel,
|
|
||||||
TFGPT2LMHeadModel,
|
|
||||||
TFRobertaForMaskedLM,
|
|
||||||
TFT5ForConditionalGeneration,
|
|
||||||
)
|
|
||||||
|
|
||||||
if is_torch_available():
|
|
||||||
from transformers import (
|
|
||||||
AutoModel,
|
|
||||||
AutoModelForCausalLM,
|
|
||||||
AutoModelForMaskedLM,
|
|
||||||
AutoModelForPreTraining,
|
|
||||||
AutoModelForQuestionAnswering,
|
|
||||||
AutoModelForSeq2SeqLM,
|
|
||||||
AutoModelForSequenceClassification,
|
|
||||||
AutoModelWithLMHead,
|
|
||||||
BertForMaskedLM,
|
|
||||||
BertForPreTraining,
|
|
||||||
BertForQuestionAnswering,
|
|
||||||
BertForSequenceClassification,
|
|
||||||
BertModel,
|
|
||||||
GPT2LMHeadModel,
|
|
||||||
RobertaForMaskedLM,
|
|
||||||
T5ForConditionalGeneration,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
class TFPTAutoModelTest(unittest.TestCase):
|
|
||||||
@slow
|
|
||||||
def test_model_from_pretrained(self):
|
|
||||||
# model_name = 'google-bert/bert-base-uncased'
|
|
||||||
for model_name in ["google-bert/bert-base-uncased"]:
|
|
||||||
config = AutoConfig.from_pretrained(model_name)
|
|
||||||
self.assertIsNotNone(config)
|
|
||||||
self.assertIsInstance(config, BertConfig)
|
|
||||||
|
|
||||||
model = TFAutoModel.from_pretrained(model_name, from_pt=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, TFBertModel)
|
|
||||||
|
|
||||||
model = AutoModel.from_pretrained(model_name, from_tf=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, BertModel)
|
|
||||||
|
|
||||||
@slow
|
|
||||||
def test_model_for_pretraining_from_pretrained(self):
|
|
||||||
# model_name = 'google-bert/bert-base-uncased'
|
|
||||||
for model_name in ["google-bert/bert-base-uncased"]:
|
|
||||||
config = AutoConfig.from_pretrained(model_name)
|
|
||||||
self.assertIsNotNone(config)
|
|
||||||
self.assertIsInstance(config, BertConfig)
|
|
||||||
|
|
||||||
model = TFAutoModelForPreTraining.from_pretrained(model_name, from_pt=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, TFBertForPreTraining)
|
|
||||||
|
|
||||||
model = AutoModelForPreTraining.from_pretrained(model_name, from_tf=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, BertForPreTraining)
|
|
||||||
|
|
||||||
@slow
|
|
||||||
def test_model_for_causal_lm(self):
|
|
||||||
model_name = "openai-community/gpt2"
|
|
||||||
config = AutoConfig.from_pretrained(model_name)
|
|
||||||
self.assertIsNotNone(config)
|
|
||||||
self.assertIsInstance(config, GPT2Config)
|
|
||||||
|
|
||||||
model = TFAutoModelForCausalLM.from_pretrained(model_name, from_pt=True)
|
|
||||||
model, loading_info = TFAutoModelForCausalLM.from_pretrained(
|
|
||||||
model_name, output_loading_info=True, from_pt=True
|
|
||||||
)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, TFGPT2LMHeadModel)
|
|
||||||
|
|
||||||
model = AutoModelForCausalLM.from_pretrained(model_name, from_tf=True)
|
|
||||||
model, loading_info = AutoModelForCausalLM.from_pretrained(model_name, output_loading_info=True, from_tf=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, GPT2LMHeadModel)
|
|
||||||
|
|
||||||
@slow
|
|
||||||
def test_lmhead_model_from_pretrained(self):
|
|
||||||
model_name = "google-bert/bert-base-uncased"
|
|
||||||
config = AutoConfig.from_pretrained(model_name)
|
|
||||||
self.assertIsNotNone(config)
|
|
||||||
self.assertIsInstance(config, BertConfig)
|
|
||||||
|
|
||||||
model = TFAutoModelWithLMHead.from_pretrained(model_name, from_pt=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, TFBertForMaskedLM)
|
|
||||||
|
|
||||||
model = AutoModelWithLMHead.from_pretrained(model_name, from_tf=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, BertForMaskedLM)
|
|
||||||
|
|
||||||
@slow
|
|
||||||
def test_model_for_masked_lm(self):
|
|
||||||
model_name = "google-bert/bert-base-uncased"
|
|
||||||
config = AutoConfig.from_pretrained(model_name)
|
|
||||||
self.assertIsNotNone(config)
|
|
||||||
self.assertIsInstance(config, BertConfig)
|
|
||||||
|
|
||||||
model = TFAutoModelForMaskedLM.from_pretrained(model_name, from_pt=True)
|
|
||||||
model, loading_info = TFAutoModelForMaskedLM.from_pretrained(
|
|
||||||
model_name, output_loading_info=True, from_pt=True
|
|
||||||
)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, TFBertForMaskedLM)
|
|
||||||
|
|
||||||
model = AutoModelForMaskedLM.from_pretrained(model_name, from_tf=True)
|
|
||||||
model, loading_info = AutoModelForMaskedLM.from_pretrained(model_name, output_loading_info=True, from_tf=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, BertForMaskedLM)
|
|
||||||
|
|
||||||
@slow
|
|
||||||
def test_model_for_encoder_decoder_lm(self):
|
|
||||||
model_name = "google-t5/t5-base"
|
|
||||||
config = AutoConfig.from_pretrained(model_name)
|
|
||||||
self.assertIsNotNone(config)
|
|
||||||
self.assertIsInstance(config, T5Config)
|
|
||||||
|
|
||||||
model = TFAutoModelForSeq2SeqLM.from_pretrained(model_name, from_pt=True)
|
|
||||||
model, loading_info = TFAutoModelForSeq2SeqLM.from_pretrained(
|
|
||||||
model_name, output_loading_info=True, from_pt=True
|
|
||||||
)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, TFT5ForConditionalGeneration)
|
|
||||||
|
|
||||||
model = AutoModelForSeq2SeqLM.from_pretrained(model_name, from_tf=True)
|
|
||||||
model, loading_info = AutoModelForSeq2SeqLM.from_pretrained(model_name, output_loading_info=True, from_tf=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, T5ForConditionalGeneration)
|
|
||||||
|
|
||||||
@slow
|
|
||||||
def test_sequence_classification_model_from_pretrained(self):
|
|
||||||
# model_name = 'google-bert/bert-base-uncased'
|
|
||||||
for model_name in ["google-bert/bert-base-uncased"]:
|
|
||||||
config = AutoConfig.from_pretrained(model_name)
|
|
||||||
self.assertIsNotNone(config)
|
|
||||||
self.assertIsInstance(config, BertConfig)
|
|
||||||
|
|
||||||
model = TFAutoModelForSequenceClassification.from_pretrained(model_name, from_pt=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, TFBertForSequenceClassification)
|
|
||||||
|
|
||||||
model = AutoModelForSequenceClassification.from_pretrained(model_name, from_tf=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, BertForSequenceClassification)
|
|
||||||
|
|
||||||
@slow
|
|
||||||
def test_question_answering_model_from_pretrained(self):
|
|
||||||
# model_name = 'google-bert/bert-base-uncased'
|
|
||||||
for model_name in ["google-bert/bert-base-uncased"]:
|
|
||||||
config = AutoConfig.from_pretrained(model_name)
|
|
||||||
self.assertIsNotNone(config)
|
|
||||||
self.assertIsInstance(config, BertConfig)
|
|
||||||
|
|
||||||
model = TFAutoModelForQuestionAnswering.from_pretrained(model_name, from_pt=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, TFBertForQuestionAnswering)
|
|
||||||
|
|
||||||
model = AutoModelForQuestionAnswering.from_pretrained(model_name, from_tf=True)
|
|
||||||
self.assertIsNotNone(model)
|
|
||||||
self.assertIsInstance(model, BertForQuestionAnswering)
|
|
||||||
|
|
||||||
def test_from_pretrained_identifier(self):
|
|
||||||
model = TFAutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER, from_pt=True)
|
|
||||||
self.assertIsInstance(model, TFBertForMaskedLM)
|
|
||||||
self.assertEqual(model.num_parameters(), 14410)
|
|
||||||
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
|
|
||||||
|
|
||||||
model = AutoModelWithLMHead.from_pretrained(SMALL_MODEL_IDENTIFIER, from_tf=True)
|
|
||||||
self.assertIsInstance(model, BertForMaskedLM)
|
|
||||||
self.assertEqual(model.num_parameters(), 14410)
|
|
||||||
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
|
|
||||||
|
|
||||||
def test_from_identifier_from_model_type(self):
|
|
||||||
model = TFAutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, from_pt=True)
|
|
||||||
self.assertIsInstance(model, TFRobertaForMaskedLM)
|
|
||||||
self.assertEqual(model.num_parameters(), 14410)
|
|
||||||
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
|
|
||||||
|
|
||||||
model = AutoModelWithLMHead.from_pretrained(DUMMY_UNKNOWN_IDENTIFIER, from_tf=True)
|
|
||||||
self.assertIsInstance(model, RobertaForMaskedLM)
|
|
||||||
self.assertEqual(model.num_parameters(), 14410)
|
|
||||||
self.assertEqual(model.num_parameters(only_trainable=True), 14410)
|
|
@ -375,9 +375,6 @@ class BlipTextModelTest(ModelTesterMixin, unittest.TestCase):
|
|||||||
model = BlipTextModel.from_pretrained(model_name)
|
model = BlipTextModel.from_pretrained(model_name)
|
||||||
self.assertIsNotNone(model)
|
self.assertIsNotNone(model)
|
||||||
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
super().test_pt_tf_model_equivalence(allow_missing_keys=True)
|
|
||||||
|
|
||||||
|
|
||||||
class BlipModelTester:
|
class BlipModelTester:
|
||||||
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
|
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
|
||||||
@ -650,9 +647,6 @@ class BlipModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
|||||||
),
|
),
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
super().test_pt_tf_model_equivalence(allow_missing_keys=True)
|
|
||||||
|
|
||||||
|
|
||||||
class BlipTextRetrievalModelTester:
|
class BlipTextRetrievalModelTester:
|
||||||
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
|
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
|
||||||
|
@ -178,6 +178,3 @@ class BlipTextModelTest(ModelTesterMixin, unittest.TestCase):
|
|||||||
model_name = "Salesforce/blip-vqa-base"
|
model_name = "Salesforce/blip-vqa-base"
|
||||||
model = BlipTextModel.from_pretrained(model_name)
|
model = BlipTextModel.from_pretrained(model_name)
|
||||||
self.assertIsNotNone(model)
|
self.assertIsNotNone(model)
|
||||||
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
super().test_pt_tf_model_equivalence(allow_missing_keys=True)
|
|
||||||
|
@ -321,9 +321,6 @@ class TFBlipTextModelTest(TFModelTesterMixin, unittest.TestCase):
|
|||||||
model = TFBlipTextModel.from_pretrained(model_name)
|
model = TFBlipTextModel.from_pretrained(model_name)
|
||||||
self.assertIsNotNone(model)
|
self.assertIsNotNone(model)
|
||||||
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=True):
|
|
||||||
super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys)
|
|
||||||
|
|
||||||
|
|
||||||
class TFBlipModelTester:
|
class TFBlipModelTester:
|
||||||
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
|
def __init__(self, parent, text_kwargs=None, vision_kwargs=None, is_training=True):
|
||||||
@ -430,9 +427,6 @@ class TFBlipModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase
|
|||||||
model = TFBlipModel.from_pretrained(model_name)
|
model = TFBlipModel.from_pretrained(model_name)
|
||||||
self.assertIsNotNone(model)
|
self.assertIsNotNone(model)
|
||||||
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=True):
|
|
||||||
super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys)
|
|
||||||
|
|
||||||
@unittest.skip("Matt: Re-enable this test when we have a proper export function for TF models.")
|
@unittest.skip("Matt: Re-enable this test when we have a proper export function for TF models.")
|
||||||
def test_saved_model_creation(self):
|
def test_saved_model_creation(self):
|
||||||
# This fails because the if return_loss: conditional can return None or a Tensor and TF hates that.
|
# This fails because the if return_loss: conditional can return None or a Tensor and TF hates that.
|
||||||
|
@ -176,6 +176,3 @@ class BlipTextModelTest(TFModelTesterMixin, unittest.TestCase):
|
|||||||
model_name = "Salesforce/blip-vqa-base"
|
model_name = "Salesforce/blip-vqa-base"
|
||||||
model = TFBlipTextModel.from_pretrained(model_name)
|
model = TFBlipTextModel.from_pretrained(model_name)
|
||||||
self.assertIsNotNone(model)
|
self.assertIsNotNone(model)
|
||||||
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=True):
|
|
||||||
super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys)
|
|
||||||
|
@ -309,10 +309,6 @@ class Data2VecVisionModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.Te
|
|||||||
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
|
msg=f"Parameter {name} of model {model_class} seems not properly initialized",
|
||||||
)
|
)
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None):
|
|
||||||
# We override with a slightly higher tol value, as semseg models tend to diverge a bit more
|
|
||||||
super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes)
|
|
||||||
|
|
||||||
def test_for_image_classification(self):
|
def test_for_image_classification(self):
|
||||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||||
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
|
self.model_tester.create_and_check_for_image_classification(*config_and_inputs)
|
||||||
|
@ -385,10 +385,6 @@ class TFData2VecVisionModelTest(TFModelTesterMixin, PipelineTesterMixin, unittes
|
|||||||
val_loss2 = history2.history["val_loss"][0]
|
val_loss2 = history2.history["val_loss"][0]
|
||||||
self.assertTrue(np.allclose(val_loss1, val_loss2, atol=1e-2, rtol=1e-3))
|
self.assertTrue(np.allclose(val_loss1, val_loss2, atol=1e-2, rtol=1e-3))
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None):
|
|
||||||
# We override with a slightly higher tol value, as semseg models tend to diverge a bit more
|
|
||||||
super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes)
|
|
||||||
|
|
||||||
# Overriding this method since the base method won't be compatible with Data2VecVision.
|
# Overriding this method since the base method won't be compatible with Data2VecVision.
|
||||||
def test_loss_computation(self):
|
def test_loss_computation(self):
|
||||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||||
|
@ -285,10 +285,6 @@ class DebertaModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
|
|||||||
def test_torch_fx(self):
|
def test_torch_fx(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker")
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@require_torch
|
@require_torch
|
||||||
@require_sentencepiece
|
@require_sentencepiece
|
||||||
|
@ -270,10 +270,6 @@ class TFDebertaModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestC
|
|||||||
model = TFDebertaModel.from_pretrained("kamalkraj/deberta-base")
|
model = TFDebertaModel.from_pretrained("kamalkraj/deberta-base")
|
||||||
self.assertIsNotNone(model)
|
self.assertIsNotNone(model)
|
||||||
|
|
||||||
@unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker")
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@require_tf
|
@require_tf
|
||||||
class TFDeBERTaModelIntegrationTest(unittest.TestCase):
|
class TFDeBERTaModelIntegrationTest(unittest.TestCase):
|
||||||
|
@ -303,10 +303,6 @@ class DebertaV2ModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCas
|
|||||||
def test_torch_fx(self):
|
def test_torch_fx(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker")
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@require_torch
|
@require_torch
|
||||||
@require_sentencepiece
|
@require_sentencepiece
|
||||||
|
@ -290,10 +290,6 @@ class TFDebertaModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestC
|
|||||||
model = TFDebertaV2Model.from_pretrained("kamalkraj/deberta-v2-xlarge")
|
model = TFDebertaV2Model.from_pretrained("kamalkraj/deberta-v2-xlarge")
|
||||||
self.assertIsNotNone(model)
|
self.assertIsNotNone(model)
|
||||||
|
|
||||||
@unittest.skip("This test was broken by the refactor in #22105, TODO @ArthurZucker")
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
@require_tf
|
@require_tf
|
||||||
class TFDeBERTaV2ModelIntegrationTest(unittest.TestCase):
|
class TFDeBERTaV2ModelIntegrationTest(unittest.TestCase):
|
||||||
|
@ -16,16 +16,14 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import copy
|
|
||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from transformers import is_tf_available, is_torch_available
|
from transformers import is_tf_available
|
||||||
from transformers.testing_utils import is_pt_tf_cross_test, require_tf, require_torch, slow, torch_device
|
from transformers.testing_utils import require_tf, slow
|
||||||
from transformers.utils.generic import ModelOutput
|
|
||||||
|
|
||||||
from ...test_modeling_tf_common import ids_tensor
|
from ...test_modeling_tf_common import ids_tensor
|
||||||
from ..bert.test_modeling_tf_bert import TFBertModelTester
|
from ..bert.test_modeling_tf_bert import TFBertModelTester
|
||||||
@ -35,8 +33,6 @@ from ..roberta.test_modeling_tf_roberta import TFRobertaModelTester
|
|||||||
|
|
||||||
|
|
||||||
if is_tf_available():
|
if is_tf_available():
|
||||||
import tensorflow as tf
|
|
||||||
|
|
||||||
from transformers import (
|
from transformers import (
|
||||||
AutoConfig,
|
AutoConfig,
|
||||||
AutoTokenizer,
|
AutoTokenizer,
|
||||||
@ -54,11 +50,6 @@ if is_tf_available():
|
|||||||
)
|
)
|
||||||
from transformers.modeling_tf_outputs import TFBaseModelOutput
|
from transformers.modeling_tf_outputs import TFBaseModelOutput
|
||||||
|
|
||||||
if is_torch_available():
|
|
||||||
import torch
|
|
||||||
|
|
||||||
from transformers import BertLMHeadModel, BertModel, EncoderDecoderModel
|
|
||||||
|
|
||||||
|
|
||||||
@require_tf
|
@require_tf
|
||||||
class TFEncoderDecoderMixin:
|
class TFEncoderDecoderMixin:
|
||||||
@ -386,188 +377,6 @@ class TFEncoderDecoderMixin:
|
|||||||
)
|
)
|
||||||
self.assertEqual(tuple(generated_output.shape.as_list()), (input_ids.shape[0],) + (decoder_config.max_length,))
|
self.assertEqual(tuple(generated_output.shape.as_list()), (input_ids.shape[0],) + (decoder_config.max_length,))
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None):
|
|
||||||
"""Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_class: The class of the model that is currently testing. For example, `TFBertModel`,
|
|
||||||
TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative
|
|
||||||
error messages.
|
|
||||||
name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc.
|
|
||||||
attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element
|
|
||||||
being a named field in the output.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.assertEqual(type(name), str)
|
|
||||||
if attributes is not None:
|
|
||||||
self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`")
|
|
||||||
|
|
||||||
# Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`).
|
|
||||||
if isinstance(tf_outputs, ModelOutput):
|
|
||||||
self.assertTrue(
|
|
||||||
isinstance(pt_outputs, ModelOutput),
|
|
||||||
f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is",
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_keys = [k for k, v in tf_outputs.items() if v is not None]
|
|
||||||
pt_keys = [k for k, v in pt_outputs.items() if v is not None]
|
|
||||||
|
|
||||||
self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch")
|
|
||||||
|
|
||||||
# convert to the case of `tuple`
|
|
||||||
# appending each key to the current (string) `names`
|
|
||||||
attributes = tuple([f"{name}.{k}" for k in tf_keys])
|
|
||||||
self.check_pt_tf_outputs(
|
|
||||||
tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes
|
|
||||||
)
|
|
||||||
|
|
||||||
# Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.)
|
|
||||||
elif type(tf_outputs) in [tuple, list]:
|
|
||||||
self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch")
|
|
||||||
self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch")
|
|
||||||
|
|
||||||
if attributes is not None:
|
|
||||||
# case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`)
|
|
||||||
self.assertEqual(
|
|
||||||
len(attributes),
|
|
||||||
len(tf_outputs),
|
|
||||||
f"{name}: The tuple `names` should have the same length as `tf_outputs`",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `names`
|
|
||||||
attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))])
|
|
||||||
|
|
||||||
for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes):
|
|
||||||
self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr)
|
|
||||||
|
|
||||||
elif isinstance(tf_outputs, tf.Tensor):
|
|
||||||
self.assertTrue(
|
|
||||||
isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is"
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_outputs = tf_outputs.numpy()
|
|
||||||
pt_outputs = pt_outputs.detach().to("cpu").numpy()
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch"
|
|
||||||
)
|
|
||||||
|
|
||||||
# deal with NumPy's scalars to make replacing nan values by 0 work.
|
|
||||||
if np.isscalar(tf_outputs):
|
|
||||||
tf_outputs = np.array([tf_outputs])
|
|
||||||
pt_outputs = np.array([pt_outputs])
|
|
||||||
|
|
||||||
tf_nans = np.isnan(tf_outputs)
|
|
||||||
pt_nans = np.isnan(pt_outputs)
|
|
||||||
|
|
||||||
pt_outputs[tf_nans] = 0
|
|
||||||
tf_outputs[tf_nans] = 0
|
|
||||||
pt_outputs[pt_nans] = 0
|
|
||||||
tf_outputs[pt_nans] = 0
|
|
||||||
|
|
||||||
max_diff = np.amax(np.abs(tf_outputs - pt_outputs))
|
|
||||||
self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).")
|
|
||||||
else:
|
|
||||||
raise ValueError(
|
|
||||||
"`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got"
|
|
||||||
f" {type(tf_outputs)} instead."
|
|
||||||
)
|
|
||||||
|
|
||||||
def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict):
|
|
||||||
pt_inputs_dict = {}
|
|
||||||
for name, key in tf_inputs_dict.items():
|
|
||||||
if isinstance(key, bool):
|
|
||||||
pt_inputs_dict[name] = key
|
|
||||||
elif name == "input_values":
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
|
|
||||||
elif name == "pixel_values":
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
|
|
||||||
elif name == "input_features":
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
|
|
||||||
# other general float inputs
|
|
||||||
elif tf_inputs_dict[name].dtype.is_floating:
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
|
|
||||||
else:
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
|
|
||||||
|
|
||||||
return pt_inputs_dict
|
|
||||||
|
|
||||||
def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict):
|
|
||||||
pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict)
|
|
||||||
|
|
||||||
# send pytorch inputs to the correct device
|
|
||||||
pt_inputs_dict = {
|
|
||||||
k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items()
|
|
||||||
}
|
|
||||||
|
|
||||||
# send pytorch model to the correct device
|
|
||||||
pt_model.to(torch_device)
|
|
||||||
|
|
||||||
# Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences
|
|
||||||
pt_model.eval()
|
|
||||||
|
|
||||||
with torch.no_grad():
|
|
||||||
pt_outputs = pt_model(**pt_inputs_dict)
|
|
||||||
tf_outputs = tf_model(tf_inputs_dict)
|
|
||||||
|
|
||||||
# tf models returned loss is usually a tensor rather than a scalar.
|
|
||||||
# (see `hf_compute_loss`: it uses `keras.losses.Reduction.NONE`)
|
|
||||||
# Change it here to a scalar to match PyTorch models' loss
|
|
||||||
tf_loss = getattr(tf_outputs, "loss", None)
|
|
||||||
if tf_loss is not None:
|
|
||||||
tf_outputs.loss = tf.math.reduce_mean(tf_loss)
|
|
||||||
|
|
||||||
self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model))
|
|
||||||
|
|
||||||
def check_pt_tf_equivalence(self, tf_model, pt_model, tf_inputs_dict):
|
|
||||||
"""Wrap `check_pt_tf_models` to further check PT -> TF again"""
|
|
||||||
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
# PT -> TF
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
pt_model.save_pretrained(tmpdirname)
|
|
||||||
tf_model = TFEncoderDecoderModel.from_pretrained(tmpdirname)
|
|
||||||
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
def check_pt_to_tf_equivalence(self, config, decoder_config, tf_inputs_dict):
|
|
||||||
"""EncoderDecoderModel requires special way to cross load (PT -> TF)"""
|
|
||||||
|
|
||||||
encoder_decoder_config = EncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config)
|
|
||||||
# Output all for aggressive testing
|
|
||||||
encoder_decoder_config.output_hidden_states = True
|
|
||||||
# All models tested in this file have attentions
|
|
||||||
encoder_decoder_config.output_attentions = True
|
|
||||||
|
|
||||||
pt_model = EncoderDecoderModel(encoder_decoder_config)
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
pt_model.save_pretrained(tmpdirname)
|
|
||||||
tf_model = TFEncoderDecoderModel.from_pretrained(tmpdirname)
|
|
||||||
|
|
||||||
self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
def check_tf_to_pt_equivalence(self, config, decoder_config, tf_inputs_dict):
|
|
||||||
"""EncoderDecoderModel requires special way to cross load (TF -> PT)"""
|
|
||||||
|
|
||||||
encoder_decoder_config = EncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config)
|
|
||||||
# Output all for aggressive testing
|
|
||||||
encoder_decoder_config.output_hidden_states = True
|
|
||||||
# TODO: A generalizable way to determine this attribute
|
|
||||||
encoder_decoder_config.output_attentions = True
|
|
||||||
|
|
||||||
tf_model = TFEncoderDecoderModel(encoder_decoder_config)
|
|
||||||
# Make sure model is built before saving
|
|
||||||
tf_model(**tf_inputs_dict)
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
# TODO Matt: PT doesn't support loading TF safetensors - remove the arg and from_tf=True when it does
|
|
||||||
tf_model.save_pretrained(tmpdirname, safe_serialization=False)
|
|
||||||
pt_model = EncoderDecoderModel.from_pretrained(tmpdirname, from_tf=True)
|
|
||||||
|
|
||||||
self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
def test_encoder_decoder_model(self):
|
def test_encoder_decoder_model(self):
|
||||||
input_ids_dict = self.prepare_config_and_inputs()
|
input_ids_dict = self.prepare_config_and_inputs()
|
||||||
self.check_encoder_decoder_model(**input_ids_dict)
|
self.check_encoder_decoder_model(**input_ids_dict)
|
||||||
@ -608,70 +417,6 @@ class TFEncoderDecoderMixin:
|
|||||||
diff = np.abs((a - b)).max()
|
diff = np.abs((a - b)).max()
|
||||||
self.assertLessEqual(diff, tol, f"Difference between torch and tf is {diff} (>= {tol}).")
|
self.assertLessEqual(diff, tol, f"Difference between torch and tf is {diff} (>= {tol}).")
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
config_inputs_dict = self.prepare_config_and_inputs()
|
|
||||||
labels = config_inputs_dict.pop("decoder_token_labels")
|
|
||||||
|
|
||||||
# Keep only common arguments
|
|
||||||
arg_names = [
|
|
||||||
"config",
|
|
||||||
"input_ids",
|
|
||||||
"attention_mask",
|
|
||||||
"decoder_config",
|
|
||||||
"decoder_input_ids",
|
|
||||||
"decoder_attention_mask",
|
|
||||||
"encoder_hidden_states",
|
|
||||||
]
|
|
||||||
config_inputs_dict = {k: v for k, v in config_inputs_dict.items() if k in arg_names}
|
|
||||||
|
|
||||||
config = config_inputs_dict.pop("config")
|
|
||||||
decoder_config = config_inputs_dict.pop("decoder_config")
|
|
||||||
|
|
||||||
# Output all for aggressive testing
|
|
||||||
config.output_hidden_states = True
|
|
||||||
decoder_config.output_hidden_states = True
|
|
||||||
# All models tested in this file have attentions
|
|
||||||
config.output_attentions = True
|
|
||||||
decoder_config.output_attentions = True
|
|
||||||
|
|
||||||
tf_inputs_dict = config_inputs_dict
|
|
||||||
# `encoder_hidden_states` is not used in model call/forward
|
|
||||||
del tf_inputs_dict["encoder_hidden_states"]
|
|
||||||
|
|
||||||
# Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency
|
|
||||||
# of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`.
|
|
||||||
for k in ["attention_mask", "decoder_attention_mask"]:
|
|
||||||
attention_mask = tf_inputs_dict[k]
|
|
||||||
|
|
||||||
# Make sure no all 0s attention masks - to avoid failure at this moment.
|
|
||||||
# Put `1` at the beginning of sequences to make it still work when combining causal attention masks.
|
|
||||||
# TODO: remove this line once a fix regarding large negative values for attention mask is done.
|
|
||||||
attention_mask = tf.concat(
|
|
||||||
[tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1
|
|
||||||
)
|
|
||||||
tf_inputs_dict[k] = attention_mask
|
|
||||||
|
|
||||||
tf_inputs_dict_with_labels = copy.copy(tf_inputs_dict)
|
|
||||||
tf_inputs_dict_with_labels["labels"] = labels
|
|
||||||
|
|
||||||
self.assertTrue(decoder_config.cross_attention_hidden_size is None)
|
|
||||||
|
|
||||||
# Original test: check without `labels` and without `enc_to_dec_proj` projection
|
|
||||||
self.assertTrue(config.hidden_size == decoder_config.hidden_size)
|
|
||||||
self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict)
|
|
||||||
self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict)
|
|
||||||
|
|
||||||
# check with `labels`
|
|
||||||
self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict_with_labels)
|
|
||||||
self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict_with_labels)
|
|
||||||
|
|
||||||
# check `enc_to_dec_proj` work as expected
|
|
||||||
decoder_config.hidden_size = decoder_config.hidden_size * 2
|
|
||||||
self.assertTrue(config.hidden_size != decoder_config.hidden_size)
|
|
||||||
self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict)
|
|
||||||
self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict)
|
|
||||||
|
|
||||||
def test_model_save_load_from_pretrained(self):
|
def test_model_save_load_from_pretrained(self):
|
||||||
model_2 = self.get_pretrained_model()
|
model_2 = self.get_pretrained_model()
|
||||||
input_ids = ids_tensor([13, 5], model_2.config.encoder.vocab_size)
|
input_ids = ids_tensor([13, 5], model_2.config.encoder.vocab_size)
|
||||||
@ -761,44 +506,6 @@ class TFBertEncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase):
|
|||||||
"labels": decoder_token_labels,
|
"labels": decoder_token_labels,
|
||||||
}
|
}
|
||||||
|
|
||||||
@slow
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_bert2bert_summarization(self):
|
|
||||||
from transformers import EncoderDecoderModel
|
|
||||||
|
|
||||||
tokenizer = AutoTokenizer.from_pretrained("google-bert/bert-base-uncased")
|
|
||||||
|
|
||||||
"""Not working, because pt checkpoint has `encoder.encoder.layer...` while tf model has `encoder.bert.encoder.layer...`.
|
|
||||||
(For Bert decoder, there is no issue, because `BertModel` is wrapped into `decoder` as `bert`)
|
|
||||||
model = TFEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16", from_pt=True)
|
|
||||||
"""
|
|
||||||
|
|
||||||
# workaround to load from pt
|
|
||||||
_model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2bert-cnn_dailymail-fp16")
|
|
||||||
_model.encoder.save_pretrained("./encoder")
|
|
||||||
_model.decoder.save_pretrained("./decoder")
|
|
||||||
model = TFEncoderDecoderModel.from_encoder_decoder_pretrained(
|
|
||||||
"./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True
|
|
||||||
)
|
|
||||||
model.config = _model.config
|
|
||||||
|
|
||||||
ARTICLE_STUDENTS = """(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which "369 went to war for the Confederate States and seven for the Union Army," the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 "colonies" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for "health and safety incidents" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. "The media has labeled us as the 'nation's deadliest fraternity,' " Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. "As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world," Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents."""
|
|
||||||
EXPECTED_SUMMARY_STUDENTS = """sae was founded in 1856, five years before the civil war. the fraternity has had to work hard to change recently. the university of oklahoma president says the university's affiliation with the fraternity is permanently done. the sae has had a string of members in recent months."""
|
|
||||||
|
|
||||||
input_dict = tokenizer(ARTICLE_STUDENTS, return_tensors="tf")
|
|
||||||
output_ids = model.generate(input_ids=input_dict["input_ids"]).numpy().tolist()
|
|
||||||
summary = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
|
|
||||||
|
|
||||||
self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS])
|
|
||||||
|
|
||||||
# Test with the TF checkpoint
|
|
||||||
model = TFEncoderDecoderModel.from_pretrained("ydshieh/bert2bert-cnn_dailymail-fp16")
|
|
||||||
|
|
||||||
output_ids = model.generate(input_ids=input_dict["input_ids"]).numpy().tolist()
|
|
||||||
summary = tokenizer.batch_decode(output_ids, skip_special_tokens=True)
|
|
||||||
|
|
||||||
self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS])
|
|
||||||
|
|
||||||
|
|
||||||
@require_tf
|
@require_tf
|
||||||
class TFGPT2EncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase):
|
class TFGPT2EncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase):
|
||||||
@ -861,37 +568,6 @@ class TFGPT2EncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase):
|
|||||||
"labels": decoder_token_labels,
|
"labels": decoder_token_labels,
|
||||||
}
|
}
|
||||||
|
|
||||||
@slow
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_bert2gpt2_summarization(self):
|
|
||||||
from transformers import EncoderDecoderModel
|
|
||||||
|
|
||||||
tokenizer_in = AutoTokenizer.from_pretrained("google-bert/bert-base-cased")
|
|
||||||
tokenizer_out = AutoTokenizer.from_pretrained("openai-community/gpt2")
|
|
||||||
|
|
||||||
"""Not working, because pt checkpoint has `encoder.encoder.layer...` while tf model has `encoder.bert.encoder.layer...`.
|
|
||||||
(For GPT2 decoder, there is no issue)
|
|
||||||
model = TFEncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16", from_pt=True)
|
|
||||||
"""
|
|
||||||
|
|
||||||
# workaround to load from pt
|
|
||||||
_model = EncoderDecoderModel.from_pretrained("patrickvonplaten/bert2gpt2-cnn_dailymail-fp16")
|
|
||||||
_model.encoder.save_pretrained("./encoder")
|
|
||||||
_model.decoder.save_pretrained("./decoder")
|
|
||||||
model = TFEncoderDecoderModel.from_encoder_decoder_pretrained(
|
|
||||||
"./encoder", "./decoder", encoder_from_pt=True, decoder_from_pt=True
|
|
||||||
)
|
|
||||||
model.config = _model.config
|
|
||||||
|
|
||||||
ARTICLE_STUDENTS = """(CNN)Sigma Alpha Epsilon is under fire for a video showing party-bound fraternity members singing a racist chant. SAE's national chapter suspended the students, but University of Oklahoma President David Boren took it a step further, saying the university's affiliation with the fraternity is permanently done. The news is shocking, but it's not the first time SAE has faced controversy. SAE was founded March 9, 1856, at the University of Alabama, five years before the American Civil War, according to the fraternity website. When the war began, the group had fewer than 400 members, of which "369 went to war for the Confederate States and seven for the Union Army," the website says. The fraternity now boasts more than 200,000 living alumni, along with about 15,000 undergraduates populating 219 chapters and 20 "colonies" seeking full membership at universities. SAE has had to work hard to change recently after a string of member deaths, many blamed on the hazing of new recruits, SAE national President Bradley Cohen wrote in a message on the fraternity's website. The fraternity's website lists more than 130 chapters cited or suspended for "health and safety incidents" since 2010. At least 30 of the incidents involved hazing, and dozens more involved alcohol. However, the list is missing numerous incidents from recent months. Among them, according to various media outlets: Yale University banned the SAEs from campus activities last month after members allegedly tried to interfere with a sexual misconduct investigation connected to an initiation rite. Stanford University in December suspended SAE housing privileges after finding sorority members attending a fraternity function were subjected to graphic sexual content. And Johns Hopkins University in November suspended the fraternity for underage drinking. "The media has labeled us as the 'nation's deadliest fraternity,' " Cohen said. In 2011, for example, a student died while being coerced into excessive alcohol consumption, according to a lawsuit. SAE's previous insurer dumped the fraternity. "As a result, we are paying Lloyd's of London the highest insurance rates in the Greek-letter world," Cohen said. Universities have turned down SAE's attempts to open new chapters, and the fraternity had to close 12 in 18 months over hazing incidents."""
|
|
||||||
EXPECTED_SUMMARY_STUDENTS = """SAS Alpha Epsilon suspended the students, but university president says it's permanent.\nThe fraternity has had to deal with a string of student deaths since 2010.\nSAS has more than 200,000 members, many of whom are students.\nA student died while being forced into excessive alcohol consumption."""
|
|
||||||
|
|
||||||
input_dict = tokenizer_in(ARTICLE_STUDENTS, return_tensors="tf")
|
|
||||||
output_ids = model.generate(input_ids=input_dict["input_ids"]).numpy().tolist()
|
|
||||||
summary = tokenizer_out.batch_decode(output_ids, skip_special_tokens=True)
|
|
||||||
|
|
||||||
self.assertEqual(summary, [EXPECTED_SUMMARY_STUDENTS])
|
|
||||||
|
|
||||||
|
|
||||||
@require_tf
|
@require_tf
|
||||||
class TFRoBertaEncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase):
|
class TFRoBertaEncoderDecoderModelTest(TFEncoderDecoderMixin, unittest.TestCase):
|
||||||
@ -1113,54 +789,6 @@ class TFEncoderDecoderModelSaveLoadTests(unittest.TestCase):
|
|||||||
max_diff = np.max(np.abs(logits_2.numpy() - logits_orig.numpy()))
|
max_diff = np.max(np.abs(logits_2.numpy() - logits_orig.numpy()))
|
||||||
self.assertAlmostEqual(max_diff, 0.0, places=4)
|
self.assertAlmostEqual(max_diff, 0.0, places=4)
|
||||||
|
|
||||||
@require_torch
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_encoder_decoder_save_load_from_encoder_decoder_from_pt(self):
|
|
||||||
config = self.get_encoder_decoder_config_small()
|
|
||||||
|
|
||||||
# create two random BERT models for bert2bert & initialize weights (+cross_attention weights)
|
|
||||||
encoder_pt = BertModel(config.encoder).to(torch_device).eval()
|
|
||||||
decoder_pt = BertLMHeadModel(config.decoder).to(torch_device).eval()
|
|
||||||
|
|
||||||
encoder_decoder_pt = EncoderDecoderModel(encoder=encoder_pt, decoder=decoder_pt).to(torch_device).eval()
|
|
||||||
|
|
||||||
input_ids = ids_tensor([13, 5], encoder_pt.config.vocab_size)
|
|
||||||
decoder_input_ids = ids_tensor([13, 1], decoder_pt.config.vocab_size)
|
|
||||||
|
|
||||||
pt_input_ids = torch.tensor(input_ids.numpy(), device=torch_device, dtype=torch.long)
|
|
||||||
pt_decoder_input_ids = torch.tensor(decoder_input_ids.numpy(), device=torch_device, dtype=torch.long)
|
|
||||||
|
|
||||||
logits_pt = encoder_decoder_pt(input_ids=pt_input_ids, decoder_input_ids=pt_decoder_input_ids).logits
|
|
||||||
|
|
||||||
# PyTorch => TensorFlow
|
|
||||||
with tempfile.TemporaryDirectory() as tmp_dirname_1, tempfile.TemporaryDirectory() as tmp_dirname_2:
|
|
||||||
encoder_decoder_pt.encoder.save_pretrained(tmp_dirname_1)
|
|
||||||
encoder_decoder_pt.decoder.save_pretrained(tmp_dirname_2)
|
|
||||||
encoder_decoder_tf = TFEncoderDecoderModel.from_encoder_decoder_pretrained(tmp_dirname_1, tmp_dirname_2)
|
|
||||||
|
|
||||||
logits_tf = encoder_decoder_tf(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits
|
|
||||||
|
|
||||||
max_diff = np.max(np.abs(logits_pt.detach().cpu().numpy() - logits_tf.numpy()))
|
|
||||||
self.assertAlmostEqual(max_diff, 0.0, places=3)
|
|
||||||
|
|
||||||
# Make sure `from_pretrained` following `save_pretrained` work and give the same result
|
|
||||||
with tempfile.TemporaryDirectory() as tmp_dirname:
|
|
||||||
encoder_decoder_tf.save_pretrained(tmp_dirname)
|
|
||||||
encoder_decoder_tf = TFEncoderDecoderModel.from_pretrained(tmp_dirname)
|
|
||||||
|
|
||||||
logits_tf_2 = encoder_decoder_tf(input_ids=input_ids, decoder_input_ids=decoder_input_ids).logits
|
|
||||||
|
|
||||||
max_diff = np.max(np.abs(logits_tf_2.numpy() - logits_tf.numpy()))
|
|
||||||
self.assertAlmostEqual(max_diff, 0.0, places=3)
|
|
||||||
|
|
||||||
# TensorFlow => PyTorch
|
|
||||||
with tempfile.TemporaryDirectory() as tmp_dirname:
|
|
||||||
encoder_decoder_tf.save_pretrained(tmp_dirname, safe_serialization=False)
|
|
||||||
encoder_decoder_pt = EncoderDecoderModel.from_pretrained(tmp_dirname, from_tf=True)
|
|
||||||
|
|
||||||
max_diff = np.max(np.abs(logits_pt.detach().cpu().numpy() - logits_tf.numpy()))
|
|
||||||
self.assertAlmostEqual(max_diff, 0.0, places=3)
|
|
||||||
|
|
||||||
@slow
|
@slow
|
||||||
def test_encoder_decoder_from_pretrained(self):
|
def test_encoder_decoder_from_pretrained(self):
|
||||||
load_weight_prefix = TFEncoderDecoderModel.load_weight_prefix
|
load_weight_prefix = TFEncoderDecoderModel.load_weight_prefix
|
||||||
|
@ -24,7 +24,7 @@ import numpy as np
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
from transformers import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
|
from transformers import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
|
||||||
from transformers.testing_utils import is_flaky, is_pt_tf_cross_test, require_torch, require_vision, slow, torch_device
|
from transformers.testing_utils import is_flaky, require_torch, require_vision, slow, torch_device
|
||||||
from transformers.utils import is_torch_available, is_vision_available
|
from transformers.utils import is_torch_available, is_vision_available
|
||||||
|
|
||||||
from ...test_configuration_common import ConfigTester
|
from ...test_configuration_common import ConfigTester
|
||||||
@ -166,18 +166,6 @@ class GroupViTVisionModelTest(ModelTesterMixin, unittest.TestCase):
|
|||||||
def test_batching_equivalence(self):
|
def test_batching_equivalence(self):
|
||||||
super().test_batching_equivalence()
|
super().test_batching_equivalence()
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
import tensorflow as tf
|
|
||||||
|
|
||||||
seed = 338
|
|
||||||
random.seed(seed)
|
|
||||||
np.random.seed(seed)
|
|
||||||
torch.manual_seed(seed)
|
|
||||||
torch.cuda.manual_seed_all(seed)
|
|
||||||
tf.random.set_seed(seed)
|
|
||||||
return super().test_pt_tf_model_equivalence()
|
|
||||||
|
|
||||||
def test_model_get_set_embeddings(self):
|
def test_model_get_set_embeddings(self):
|
||||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||||
|
|
||||||
@ -595,22 +583,6 @@ class GroupViTModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase
|
|||||||
def test_model_get_set_embeddings(self):
|
def test_model_get_set_embeddings(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
# overwritten from parent as this equivalent test needs a specific `seed` and hard to get a good one!
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-5, name="outputs", attributes=None):
|
|
||||||
super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol=tol, name=name, attributes=attributes)
|
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
import tensorflow as tf
|
|
||||||
|
|
||||||
seed = 163
|
|
||||||
random.seed(seed)
|
|
||||||
np.random.seed(seed)
|
|
||||||
torch.manual_seed(seed)
|
|
||||||
torch.cuda.manual_seed_all(seed)
|
|
||||||
tf.random.set_seed(seed)
|
|
||||||
return super().test_pt_tf_model_equivalence()
|
|
||||||
|
|
||||||
# override as the `logit_scale` parameter initilization is different for GROUPVIT
|
# override as the `logit_scale` parameter initilization is different for GROUPVIT
|
||||||
def test_initialization(self):
|
def test_initialization(self):
|
||||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||||
|
@ -23,12 +23,10 @@ import tempfile
|
|||||||
import unittest
|
import unittest
|
||||||
from importlib import import_module
|
from importlib import import_module
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
from transformers import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
|
from transformers import GroupViTConfig, GroupViTTextConfig, GroupViTVisionConfig
|
||||||
from transformers.testing_utils import (
|
from transformers.testing_utils import (
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_tensorflow_probability,
|
require_tensorflow_probability,
|
||||||
require_tf,
|
require_tf,
|
||||||
require_vision,
|
require_vision,
|
||||||
@ -149,10 +147,6 @@ class TFGroupViTVisionModelTest(TFModelTesterMixin, unittest.TestCase):
|
|||||||
test_head_masking = False
|
test_head_masking = False
|
||||||
test_onnx = False
|
test_onnx = False
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None):
|
|
||||||
# We override with a slightly higher tol value, as this model tends to diverge a bit more
|
|
||||||
super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes)
|
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.model_tester = TFGroupViTVisionModelTester(self)
|
self.model_tester = TFGroupViTVisionModelTester(self)
|
||||||
self.config_tester = ConfigTester(
|
self.config_tester = ConfigTester(
|
||||||
@ -291,25 +285,6 @@ class TFGroupViTVisionModelTest(TFModelTesterMixin, unittest.TestCase):
|
|||||||
|
|
||||||
check_hidden_states_output(inputs_dict, config, model_class)
|
check_hidden_states_output(inputs_dict, config, model_class)
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
# `GroupViT` computes some indices using argmax, uses them as
|
|
||||||
# one-hot encoding for further computation. The problem is
|
|
||||||
# while PT/TF have very small difference in `y_soft` (~ 1e-9),
|
|
||||||
# the argmax could be totally different, if there are at least
|
|
||||||
# 2 indices with almost identical values. This leads to very
|
|
||||||
# large difference in the outputs. We need specific seeds to
|
|
||||||
# avoid almost identical values happening in `y_soft`.
|
|
||||||
import torch
|
|
||||||
|
|
||||||
seed = 338
|
|
||||||
random.seed(seed)
|
|
||||||
np.random.seed(seed)
|
|
||||||
torch.manual_seed(seed)
|
|
||||||
torch.cuda.manual_seed_all(seed)
|
|
||||||
tf.random.set_seed(seed)
|
|
||||||
return super().test_pt_tf_model_equivalence()
|
|
||||||
|
|
||||||
@slow
|
@slow
|
||||||
def test_model_from_pretrained(self):
|
def test_model_from_pretrained(self):
|
||||||
model_name = "nvidia/groupvit-gcc-yfcc"
|
model_name = "nvidia/groupvit-gcc-yfcc"
|
||||||
@ -462,10 +437,6 @@ class TFGroupViTTextModelTest(TFModelTesterMixin, unittest.TestCase):
|
|||||||
test_head_masking = False
|
test_head_masking = False
|
||||||
test_onnx = False
|
test_onnx = False
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None):
|
|
||||||
# We override with a slightly higher tol value, as this model tends to diverge a bit more
|
|
||||||
super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes)
|
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.model_tester = TFGroupViTTextModelTester(self)
|
self.model_tester = TFGroupViTTextModelTester(self)
|
||||||
self.config_tester = ConfigTester(self, config_class=GroupViTTextConfig, hidden_size=37)
|
self.config_tester = ConfigTester(self, config_class=GroupViTTextConfig, hidden_size=37)
|
||||||
@ -588,10 +559,6 @@ class TFGroupViTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.Test
|
|||||||
test_attention_outputs = False
|
test_attention_outputs = False
|
||||||
test_onnx = False
|
test_onnx = False
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None):
|
|
||||||
# We override with a slightly higher tol value, as this model tends to diverge a bit more
|
|
||||||
super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes)
|
|
||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
self.model_tester = TFGroupViTModelTester(self)
|
self.model_tester = TFGroupViTModelTester(self)
|
||||||
|
|
||||||
@ -616,25 +583,6 @@ class TFGroupViTModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.Test
|
|||||||
def test_keras_fit(self):
|
def test_keras_fit(self):
|
||||||
super().test_keras_fit()
|
super().test_keras_fit()
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
# `GroupViT` computes some indices using argmax, uses them as
|
|
||||||
# one-hot encoding for further computation. The problem is
|
|
||||||
# while PT/TF have very small difference in `y_soft` (~ 1e-9),
|
|
||||||
# the argmax could be totally different, if there are at least
|
|
||||||
# 2 indices with almost identical values. This leads to very
|
|
||||||
# large difference in the outputs. We need specific seeds to
|
|
||||||
# avoid almost identical values happening in `y_soft`.
|
|
||||||
import torch
|
|
||||||
|
|
||||||
seed = 158
|
|
||||||
random.seed(seed)
|
|
||||||
np.random.seed(seed)
|
|
||||||
torch.manual_seed(seed)
|
|
||||||
torch.cuda.manual_seed_all(seed)
|
|
||||||
tf.random.set_seed(seed)
|
|
||||||
return super().test_pt_tf_model_equivalence()
|
|
||||||
|
|
||||||
# overwrite from common since `TFGroupViTModelTester` set `return_loss` to `True` and causes the preparation of
|
# overwrite from common since `TFGroupViTModelTester` set `return_loss` to `True` and causes the preparation of
|
||||||
# `symbolic_inputs` failed.
|
# `symbolic_inputs` failed.
|
||||||
def test_keras_save_load(self):
|
def test_keras_save_load(self):
|
||||||
|
@ -19,15 +19,13 @@ from __future__ import annotations
|
|||||||
import copy
|
import copy
|
||||||
import inspect
|
import inspect
|
||||||
import math
|
import math
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import pytest
|
import pytest
|
||||||
|
|
||||||
from transformers import is_tf_available
|
from transformers import is_tf_available
|
||||||
from transformers.testing_utils import is_pt_tf_cross_test, require_soundfile, require_tf, slow
|
from transformers.testing_utils import require_soundfile, require_tf, slow
|
||||||
|
|
||||||
from ...test_configuration_common import ConfigTester
|
from ...test_configuration_common import ConfigTester
|
||||||
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
|
from ...test_modeling_tf_common import TFModelTesterMixin, ids_tensor
|
||||||
@ -337,62 +335,6 @@ class TFHubertModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCa
|
|||||||
# TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC
|
# TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=False):
|
|
||||||
# We override the base test here to skip loss calculation for Hubert models because the loss is massive with
|
|
||||||
# the default labels and frequently overflows to inf or exceeds numerical tolerances between TF/PT
|
|
||||||
import torch
|
|
||||||
|
|
||||||
import transformers
|
|
||||||
|
|
||||||
for model_class in self.all_model_classes:
|
|
||||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
|
||||||
|
|
||||||
# Output all for aggressive testing
|
|
||||||
config.output_hidden_states = True
|
|
||||||
config.output_attentions = self.has_attentions
|
|
||||||
|
|
||||||
# Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency
|
|
||||||
# of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`.
|
|
||||||
# TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it.
|
|
||||||
self._make_attention_mask_non_null(inputs_dict)
|
|
||||||
|
|
||||||
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning
|
|
||||||
pt_model_class = getattr(transformers, pt_model_class_name)
|
|
||||||
|
|
||||||
tf_model = model_class(config)
|
|
||||||
pt_model = pt_model_class(config)
|
|
||||||
|
|
||||||
tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
|
|
||||||
|
|
||||||
# Check we can load pt model in tf and vice-versa with model => model functions
|
|
||||||
tf_model = transformers.load_pytorch_model_in_tf2_model(
|
|
||||||
tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
pt_model = transformers.load_tf2_model_in_pytorch_model(
|
|
||||||
pt_model, tf_model, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
# Original test: check without `labels`
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
|
|
||||||
torch.save(pt_model.state_dict(), pt_checkpoint_path)
|
|
||||||
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(
|
|
||||||
tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
|
|
||||||
tf_model.save_weights(tf_checkpoint_path)
|
|
||||||
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(
|
|
||||||
pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
# Original test: check without `labels`
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
|
|
||||||
@require_tf
|
@require_tf
|
||||||
class TFHubertRobustModelTest(TFModelTesterMixin, unittest.TestCase):
|
class TFHubertRobustModelTest(TFModelTesterMixin, unittest.TestCase):
|
||||||
@ -518,62 +460,6 @@ class TFHubertRobustModelTest(TFModelTesterMixin, unittest.TestCase):
|
|||||||
# TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC
|
# TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=False):
|
|
||||||
# We override the base test here to skip loss calculation for Hubert models because the loss is massive with
|
|
||||||
# the default labels and frequently overflows to inf or exceeds numerical tolerances between TF/PT
|
|
||||||
import torch
|
|
||||||
|
|
||||||
import transformers
|
|
||||||
|
|
||||||
for model_class in self.all_model_classes:
|
|
||||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
|
||||||
|
|
||||||
# Output all for aggressive testing
|
|
||||||
config.output_hidden_states = True
|
|
||||||
config.output_attentions = self.has_attentions
|
|
||||||
|
|
||||||
# Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency
|
|
||||||
# of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`.
|
|
||||||
# TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it.
|
|
||||||
self._make_attention_mask_non_null(inputs_dict)
|
|
||||||
|
|
||||||
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning
|
|
||||||
pt_model_class = getattr(transformers, pt_model_class_name)
|
|
||||||
|
|
||||||
tf_model = model_class(config)
|
|
||||||
pt_model = pt_model_class(config)
|
|
||||||
|
|
||||||
tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
|
|
||||||
|
|
||||||
# Check we can load pt model in tf and vice-versa with model => model functions
|
|
||||||
tf_model = transformers.load_pytorch_model_in_tf2_model(
|
|
||||||
tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
pt_model = transformers.load_tf2_model_in_pytorch_model(
|
|
||||||
pt_model, tf_model, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
# Original test: check without `labels`
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
|
|
||||||
torch.save(pt_model.state_dict(), pt_checkpoint_path)
|
|
||||||
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(
|
|
||||||
tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
|
|
||||||
tf_model.save_weights(tf_checkpoint_path)
|
|
||||||
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(
|
|
||||||
pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
# Original test: check without `labels`
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
|
|
||||||
@require_tf
|
@require_tf
|
||||||
class TFHubertUtilsTest(unittest.TestCase):
|
class TFHubertUtilsTest(unittest.TestCase):
|
||||||
|
@ -23,7 +23,6 @@ from parameterized import parameterized
|
|||||||
from transformers import BitsAndBytesConfig, IdeficsConfig, is_torch_available, is_vision_available
|
from transformers import BitsAndBytesConfig, IdeficsConfig, is_torch_available, is_vision_available
|
||||||
from transformers.testing_utils import (
|
from transformers.testing_utils import (
|
||||||
TestCasePlus,
|
TestCasePlus,
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_bitsandbytes,
|
require_bitsandbytes,
|
||||||
require_torch,
|
require_torch,
|
||||||
require_torch_sdpa,
|
require_torch_sdpa,
|
||||||
@ -574,11 +573,6 @@ class IdeficsModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
|
|||||||
|
|
||||||
check_hidden_states_output(inputs_dict, config, model_class)
|
check_hidden_states_output(inputs_dict, config, model_class)
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=False):
|
|
||||||
self.has_attentions = False
|
|
||||||
super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys)
|
|
||||||
|
|
||||||
@slow
|
@slow
|
||||||
def test_model_from_pretrained(self):
|
def test_model_from_pretrained(self):
|
||||||
model_name = "HuggingFaceM4/idefics-9b"
|
model_name = "HuggingFaceM4/idefics-9b"
|
||||||
|
@ -20,7 +20,7 @@ import unittest
|
|||||||
from importlib import import_module
|
from importlib import import_module
|
||||||
|
|
||||||
from transformers import IdeficsConfig, is_tf_available, is_vision_available
|
from transformers import IdeficsConfig, is_tf_available, is_vision_available
|
||||||
from transformers.testing_utils import TestCasePlus, is_pt_tf_cross_test, require_tf, require_vision, slow
|
from transformers.testing_utils import TestCasePlus, require_tf, require_vision, slow
|
||||||
from transformers.utils import cached_property
|
from transformers.utils import cached_property
|
||||||
|
|
||||||
from ...test_configuration_common import ConfigTester
|
from ...test_configuration_common import ConfigTester
|
||||||
@ -420,11 +420,6 @@ class TFIdeficsModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestC
|
|||||||
|
|
||||||
check_hidden_states_output(inputs_dict, config, model_class)
|
check_hidden_states_output(inputs_dict, config, model_class)
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=False):
|
|
||||||
self.has_attentions = False
|
|
||||||
super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys)
|
|
||||||
|
|
||||||
def test_keras_save_load(self):
|
def test_keras_save_load(self):
|
||||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||||
|
|
||||||
|
@ -41,7 +41,6 @@ from transformers.models.layoutlmv2.tokenization_layoutlmv2 import (
|
|||||||
_is_whitespace,
|
_is_whitespace,
|
||||||
)
|
)
|
||||||
from transformers.testing_utils import (
|
from transformers.testing_utils import (
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_detectron2,
|
require_detectron2,
|
||||||
require_pandas,
|
require_pandas,
|
||||||
require_tokenizers,
|
require_tokenizers,
|
||||||
@ -1497,48 +1496,6 @@ class LayoutLMv2TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
|
|||||||
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
|
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
|
||||||
self.assertLessEqual(len(new_encoded_inputs), 20)
|
self.assertLessEqual(len(new_encoded_inputs), 20)
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_batch_encode_plus_tensors(self):
|
|
||||||
tokenizers = self.get_tokenizers(do_lower_case=False)
|
|
||||||
for tokenizer in tokenizers:
|
|
||||||
with self.subTest(f"{tokenizer.__class__.__name__}"):
|
|
||||||
words, boxes = self.get_words_and_boxes_batch()
|
|
||||||
|
|
||||||
# A Tensor cannot be build by sequences which are not the same size
|
|
||||||
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="pt")
|
|
||||||
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="tf")
|
|
||||||
|
|
||||||
if tokenizer.pad_token_id is None:
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus,
|
|
||||||
words,
|
|
||||||
boxes=boxes,
|
|
||||||
padding=True,
|
|
||||||
return_tensors="pt",
|
|
||||||
)
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus,
|
|
||||||
words,
|
|
||||||
boxes=boxes,
|
|
||||||
padding="longest",
|
|
||||||
return_tensors="tf",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
pytorch_tensor = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True, return_tensors="pt")
|
|
||||||
tensorflow_tensor = tokenizer.batch_encode_plus(
|
|
||||||
words, boxes=boxes, padding="longest", return_tensors="tf"
|
|
||||||
)
|
|
||||||
encoded_sequences = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True)
|
|
||||||
|
|
||||||
for key in encoded_sequences.keys():
|
|
||||||
pytorch_value = pytorch_tensor[key].tolist()
|
|
||||||
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
|
|
||||||
encoded_value = encoded_sequences[key]
|
|
||||||
|
|
||||||
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
|
|
||||||
|
|
||||||
def test_sequence_ids(self):
|
def test_sequence_ids(self):
|
||||||
tokenizers = self.get_tokenizers()
|
tokenizers = self.get_tokenizers()
|
||||||
for tokenizer in tokenizers:
|
for tokenizer in tokenizers:
|
||||||
|
@ -34,7 +34,6 @@ from transformers import (
|
|||||||
)
|
)
|
||||||
from transformers.models.layoutlmv3.tokenization_layoutlmv3 import VOCAB_FILES_NAMES, LayoutLMv3Tokenizer
|
from transformers.models.layoutlmv3.tokenization_layoutlmv3 import VOCAB_FILES_NAMES, LayoutLMv3Tokenizer
|
||||||
from transformers.testing_utils import (
|
from transformers.testing_utils import (
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_pandas,
|
require_pandas,
|
||||||
require_tf,
|
require_tf,
|
||||||
require_tokenizers,
|
require_tokenizers,
|
||||||
@ -1375,48 +1374,6 @@ class LayoutLMv3TokenizationTest(TokenizerTesterMixin, unittest.TestCase):
|
|||||||
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
|
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
|
||||||
self.assertLessEqual(len(new_encoded_inputs), 20)
|
self.assertLessEqual(len(new_encoded_inputs), 20)
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_batch_encode_plus_tensors(self):
|
|
||||||
tokenizers = self.get_tokenizers(do_lower_case=False)
|
|
||||||
for tokenizer in tokenizers:
|
|
||||||
with self.subTest(f"{tokenizer.__class__.__name__}"):
|
|
||||||
words, boxes = self.get_words_and_boxes_batch()
|
|
||||||
|
|
||||||
# A Tensor cannot be build by sequences which are not the same size
|
|
||||||
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="pt")
|
|
||||||
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="tf")
|
|
||||||
|
|
||||||
if tokenizer.pad_token_id is None:
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus,
|
|
||||||
words,
|
|
||||||
boxes=boxes,
|
|
||||||
padding=True,
|
|
||||||
return_tensors="pt",
|
|
||||||
)
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus,
|
|
||||||
words,
|
|
||||||
boxes=boxes,
|
|
||||||
padding="longest",
|
|
||||||
return_tensors="tf",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
pytorch_tensor = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True, return_tensors="pt")
|
|
||||||
tensorflow_tensor = tokenizer.batch_encode_plus(
|
|
||||||
words, boxes=boxes, padding="longest", return_tensors="tf"
|
|
||||||
)
|
|
||||||
encoded_sequences = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True)
|
|
||||||
|
|
||||||
for key in encoded_sequences.keys():
|
|
||||||
pytorch_value = pytorch_tensor[key].tolist()
|
|
||||||
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
|
|
||||||
encoded_value = encoded_sequences[key]
|
|
||||||
|
|
||||||
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
|
|
||||||
|
|
||||||
def test_sequence_ids(self):
|
def test_sequence_ids(self):
|
||||||
tokenizers = self.get_tokenizers()
|
tokenizers = self.get_tokenizers()
|
||||||
for tokenizer in tokenizers:
|
for tokenizer in tokenizers:
|
||||||
|
@ -32,7 +32,6 @@ from transformers import (
|
|||||||
from transformers.models.layoutxlm.tokenization_layoutxlm import LayoutXLMTokenizer
|
from transformers.models.layoutxlm.tokenization_layoutxlm import LayoutXLMTokenizer
|
||||||
from transformers.testing_utils import (
|
from transformers.testing_utils import (
|
||||||
get_tests_dir,
|
get_tests_dir,
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_pandas,
|
require_pandas,
|
||||||
require_sentencepiece,
|
require_sentencepiece,
|
||||||
require_tokenizers,
|
require_tokenizers,
|
||||||
@ -1426,48 +1425,6 @@ class LayoutXLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
|
|||||||
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
|
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
|
||||||
self.assertLessEqual(len(new_encoded_inputs), 20)
|
self.assertLessEqual(len(new_encoded_inputs), 20)
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_batch_encode_plus_tensors(self):
|
|
||||||
tokenizers = self.get_tokenizers(do_lower_case=False)
|
|
||||||
for tokenizer in tokenizers:
|
|
||||||
with self.subTest(f"{tokenizer.__class__.__name__}"):
|
|
||||||
words, boxes = self.get_words_and_boxes_batch()
|
|
||||||
|
|
||||||
# A Tensor cannot be build by sequences which are not the same size
|
|
||||||
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="pt")
|
|
||||||
self.assertRaises(ValueError, tokenizer.batch_encode_plus, words, boxes=boxes, return_tensors="tf")
|
|
||||||
|
|
||||||
if tokenizer.pad_token_id is None:
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus,
|
|
||||||
words,
|
|
||||||
boxes=boxes,
|
|
||||||
padding=True,
|
|
||||||
return_tensors="pt",
|
|
||||||
)
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus,
|
|
||||||
words,
|
|
||||||
boxes=boxes,
|
|
||||||
padding="longest",
|
|
||||||
return_tensors="tf",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
pytorch_tensor = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True, return_tensors="pt")
|
|
||||||
tensorflow_tensor = tokenizer.batch_encode_plus(
|
|
||||||
words, boxes=boxes, padding="longest", return_tensors="tf"
|
|
||||||
)
|
|
||||||
encoded_sequences = tokenizer.batch_encode_plus(words, boxes=boxes, padding=True)
|
|
||||||
|
|
||||||
for key in encoded_sequences.keys():
|
|
||||||
pytorch_value = pytorch_tensor[key].tolist()
|
|
||||||
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
|
|
||||||
encoded_value = encoded_sequences[key]
|
|
||||||
|
|
||||||
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
|
|
||||||
|
|
||||||
def test_sequence_ids(self):
|
def test_sequence_ids(self):
|
||||||
tokenizers = self.get_tokenizers()
|
tokenizers = self.get_tokenizers()
|
||||||
for tokenizer in tokenizers:
|
for tokenizer in tokenizers:
|
||||||
|
@ -33,7 +33,7 @@ from transformers import (
|
|||||||
logging,
|
logging,
|
||||||
)
|
)
|
||||||
from transformers.models.markuplm.tokenization_markuplm import VOCAB_FILES_NAMES, MarkupLMTokenizer
|
from transformers.models.markuplm.tokenization_markuplm import VOCAB_FILES_NAMES, MarkupLMTokenizer
|
||||||
from transformers.testing_utils import is_pt_tf_cross_test, require_tokenizers, require_torch, slow
|
from transformers.testing_utils import require_tokenizers, require_torch, slow
|
||||||
|
|
||||||
from ...test_tokenization_common import SMALL_TRAINING_CORPUS, TokenizerTesterMixin, merge_model_tokenizer_mappings
|
from ...test_tokenization_common import SMALL_TRAINING_CORPUS, TokenizerTesterMixin, merge_model_tokenizer_mappings
|
||||||
|
|
||||||
@ -1258,50 +1258,6 @@ class MarkupLMTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
|
|||||||
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
|
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
|
||||||
self.assertLessEqual(len(new_encoded_inputs), 20)
|
self.assertLessEqual(len(new_encoded_inputs), 20)
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_batch_encode_plus_tensors(self):
|
|
||||||
tokenizers = self.get_tokenizers(do_lower_case=False)
|
|
||||||
for tokenizer in tokenizers:
|
|
||||||
with self.subTest(f"{tokenizer.__class__.__name__}"):
|
|
||||||
nodes, xpaths = self.get_nodes_and_xpaths_batch()
|
|
||||||
|
|
||||||
# A Tensor cannot be build by sequences which are not the same size
|
|
||||||
self.assertRaises(ValueError, tokenizer.batch_encode_plus, nodes, xpaths=xpaths, return_tensors="pt")
|
|
||||||
self.assertRaises(ValueError, tokenizer.batch_encode_plus, nodes, xpaths=xpaths, return_tensors="tf")
|
|
||||||
|
|
||||||
if tokenizer.pad_token_id is None:
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus,
|
|
||||||
nodes,
|
|
||||||
xpaths=xpaths,
|
|
||||||
padding=True,
|
|
||||||
return_tensors="pt",
|
|
||||||
)
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus,
|
|
||||||
nodes,
|
|
||||||
xpaths=xpaths,
|
|
||||||
padding="longest",
|
|
||||||
return_tensors="tf",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
pytorch_tensor = tokenizer.batch_encode_plus(
|
|
||||||
nodes, xpaths=xpaths, padding=True, return_tensors="pt"
|
|
||||||
)
|
|
||||||
tensorflow_tensor = tokenizer.batch_encode_plus(
|
|
||||||
nodes, xpaths=xpaths, padding="longest", return_tensors="tf"
|
|
||||||
)
|
|
||||||
encoded_sequences = tokenizer.batch_encode_plus(nodes, xpaths=xpaths, padding=True)
|
|
||||||
|
|
||||||
for key in encoded_sequences.keys():
|
|
||||||
pytorch_value = pytorch_tensor[key].tolist()
|
|
||||||
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
|
|
||||||
encoded_value = encoded_sequences[key]
|
|
||||||
|
|
||||||
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
|
|
||||||
|
|
||||||
def test_sequence_ids(self):
|
def test_sequence_ids(self):
|
||||||
tokenizers = self.get_tokenizers()
|
tokenizers = self.get_tokenizers()
|
||||||
for tokenizer in tokenizers:
|
for tokenizer in tokenizers:
|
||||||
|
@ -433,10 +433,6 @@ class SamModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
|||||||
def test_hidden_states_output(self):
|
def test_hidden_states_output(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None):
|
|
||||||
# Use a slightly higher default tol to make the tests non-flaky
|
|
||||||
super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol=tol, name=name, attributes=attributes)
|
|
||||||
|
|
||||||
@slow
|
@slow
|
||||||
def test_model_from_pretrained(self):
|
def test_model_from_pretrained(self):
|
||||||
model_name = "facebook/sam-vit-huge"
|
model_name = "facebook/sam-vit-huge"
|
||||||
|
@ -411,16 +411,6 @@ class TFSamModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCase)
|
|||||||
model = TFSamModel.from_pretrained("facebook/sam-vit-base") # sam-vit-huge blows out our memory
|
model = TFSamModel.from_pretrained("facebook/sam-vit-base") # sam-vit-huge blows out our memory
|
||||||
self.assertIsNotNone(model)
|
self.assertIsNotNone(model)
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-4, name="outputs", attributes=None):
|
|
||||||
super().check_pt_tf_outputs(
|
|
||||||
tf_outputs=tf_outputs,
|
|
||||||
pt_outputs=pt_outputs,
|
|
||||||
model_class=model_class,
|
|
||||||
tol=tol,
|
|
||||||
name=name,
|
|
||||||
attributes=attributes,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_image():
|
def prepare_image():
|
||||||
img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
|
img_url = "https://huggingface.co/ybelkada/segment-anything/resolve/main/assets/car.png"
|
||||||
|
@ -18,7 +18,6 @@ import unittest
|
|||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from transformers.testing_utils import (
|
from transformers.testing_utils import (
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_tf,
|
require_tf,
|
||||||
require_torch,
|
require_torch,
|
||||||
require_torchvision,
|
require_torchvision,
|
||||||
@ -340,42 +339,3 @@ class SamProcessorEquivalenceTest(unittest.TestCase):
|
|||||||
def prepare_image_inputs(self):
|
def prepare_image_inputs(self):
|
||||||
"""This function prepares a list of PIL images."""
|
"""This function prepares a list of PIL images."""
|
||||||
return prepare_image_inputs()
|
return prepare_image_inputs()
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_post_process_masks_equivalence(self):
|
|
||||||
image_processor = self.get_image_processor()
|
|
||||||
|
|
||||||
processor = SamProcessor(image_processor=image_processor)
|
|
||||||
dummy_masks = np.random.randint(0, 2, size=(1, 3, 5, 5)).astype(np.float32)
|
|
||||||
tf_dummy_masks = [tf.convert_to_tensor(dummy_masks)]
|
|
||||||
pt_dummy_masks = [torch.tensor(dummy_masks)]
|
|
||||||
|
|
||||||
original_sizes = [[1764, 2646]]
|
|
||||||
|
|
||||||
reshaped_input_size = [[683, 1024]]
|
|
||||||
tf_masks = processor.post_process_masks(
|
|
||||||
tf_dummy_masks, original_sizes, reshaped_input_size, return_tensors="tf"
|
|
||||||
)
|
|
||||||
pt_masks = processor.post_process_masks(
|
|
||||||
pt_dummy_masks, original_sizes, reshaped_input_size, return_tensors="pt"
|
|
||||||
)
|
|
||||||
|
|
||||||
self.assertTrue(np.all(tf_masks[0].numpy() == pt_masks[0].numpy()))
|
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_image_processor_equivalence(self):
|
|
||||||
image_processor = self.get_image_processor()
|
|
||||||
|
|
||||||
processor = SamProcessor(image_processor=image_processor)
|
|
||||||
|
|
||||||
image_input = self.prepare_image_inputs()
|
|
||||||
|
|
||||||
pt_input_feat_extract = image_processor(image_input, return_tensors="pt")["pixel_values"].numpy()
|
|
||||||
pt_input_processor = processor(images=image_input, return_tensors="pt")["pixel_values"].numpy()
|
|
||||||
|
|
||||||
tf_input_feat_extract = image_processor(image_input, return_tensors="tf")["pixel_values"].numpy()
|
|
||||||
tf_input_processor = processor(images=image_input, return_tensors="tf")["pixel_values"].numpy()
|
|
||||||
|
|
||||||
self.assertTrue(np.allclose(pt_input_feat_extract, pt_input_processor))
|
|
||||||
self.assertTrue(np.allclose(pt_input_feat_extract, tf_input_feat_extract))
|
|
||||||
self.assertTrue(np.allclose(pt_input_feat_extract, tf_input_processor))
|
|
||||||
|
@ -431,10 +431,6 @@ class TFSegformerModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.Tes
|
|||||||
model = model_class(config)
|
model = model_class(config)
|
||||||
apply(model)
|
apply(model)
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=2e-4, name="outputs", attributes=None):
|
|
||||||
# We override with a slightly higher tol value, as semseg models tend to diverge a bit more
|
|
||||||
super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes)
|
|
||||||
|
|
||||||
@slow
|
@slow
|
||||||
def test_model_from_pretrained(self):
|
def test_model_from_pretrained(self):
|
||||||
model_name = "nvidia/segformer-b0-finetuned-ade-512-512"
|
model_name = "nvidia/segformer-b0-finetuned-ade-512-512"
|
||||||
|
@ -693,10 +693,6 @@ class Speech2TextModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTest
|
|||||||
|
|
||||||
self.assertTrue(models_equal)
|
self.assertTrue(models_equal)
|
||||||
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=True):
|
|
||||||
# Allow missing keys since TF doesn't cache the sinusoidal embeddings in an attribute
|
|
||||||
super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys)
|
|
||||||
|
|
||||||
@unittest.skip(reason="Test failing, @RocketNight is looking into it")
|
@unittest.skip(reason="Test failing, @RocketNight is looking into it")
|
||||||
def test_tf_from_pt_safetensors(self):
|
def test_tf_from_pt_safetensors(self):
|
||||||
pass
|
pass
|
||||||
|
@ -434,10 +434,6 @@ class TFSpeech2TextModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.T
|
|||||||
]
|
]
|
||||||
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
|
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
|
||||||
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=True):
|
|
||||||
# Allow missing keys since TF doesn't cache the sinusoidal embeddings in an attribute
|
|
||||||
super().test_pt_tf_model_equivalence(allow_missing_keys=allow_missing_keys)
|
|
||||||
|
|
||||||
|
|
||||||
@require_tf
|
@require_tf
|
||||||
@require_sentencepiece
|
@require_sentencepiece
|
||||||
|
@ -32,7 +32,7 @@ from transformers import (
|
|||||||
is_torch_available,
|
is_torch_available,
|
||||||
)
|
)
|
||||||
from transformers.models.auto import get_values
|
from transformers.models.auto import get_values
|
||||||
from transformers.testing_utils import require_tensorflow_probability, require_torch, slow, torch_device
|
from transformers.testing_utils import require_torch, slow, torch_device
|
||||||
from transformers.utils import cached_property
|
from transformers.utils import cached_property
|
||||||
|
|
||||||
from ...test_configuration_common import ConfigTester
|
from ...test_configuration_common import ConfigTester
|
||||||
@ -522,11 +522,6 @@ class TapasModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
|||||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||||
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
|
self.model_tester.create_and_check_for_sequence_classification(*config_and_inputs)
|
||||||
|
|
||||||
@require_tensorflow_probability
|
|
||||||
@unittest.skip(reason="tfp is not defined even if installed. FIXME @Arthur in a followup PR!")
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
@unittest.skip(reason="tfp is not defined even if installed. FIXME @Arthur in a followup PR!")
|
@unittest.skip(reason="tfp is not defined even if installed. FIXME @Arthur in a followup PR!")
|
||||||
def test_tf_from_pt_safetensors(self):
|
def test_tf_from_pt_safetensors(self):
|
||||||
pass
|
pass
|
||||||
|
@ -535,10 +535,6 @@ class TFTapasModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCas
|
|||||||
def test_loss_computation(self):
|
def test_loss_computation(self):
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@unittest.skip("tfp is not defined even if installed. FIXME @Arthur in a followup PR!")
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
def prepare_tapas_single_inputs_for_inference():
|
def prepare_tapas_single_inputs_for_inference():
|
||||||
# Here we prepare a single table-question pair to test TAPAS inference on:
|
# Here we prepare a single table-question pair to test TAPAS inference on:
|
||||||
|
@ -34,7 +34,6 @@ from transformers.models.tapas.tokenization_tapas import (
|
|||||||
_is_whitespace,
|
_is_whitespace,
|
||||||
)
|
)
|
||||||
from transformers.testing_utils import (
|
from transformers.testing_utils import (
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_pandas,
|
require_pandas,
|
||||||
require_tensorflow_probability,
|
require_tensorflow_probability,
|
||||||
require_tokenizers,
|
require_tokenizers,
|
||||||
@ -1158,54 +1157,6 @@ class TapasTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
|
|||||||
|
|
||||||
self.assertListEqual(encoding.input_ids[:2], expected_results)
|
self.assertListEqual(encoding.input_ids[:2], expected_results)
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_batch_encode_plus_tensors(self):
|
|
||||||
tokenizers = self.get_tokenizers(do_lower_case=False)
|
|
||||||
for tokenizer in tokenizers:
|
|
||||||
with self.subTest(f"{tokenizer.__class__.__name__}"):
|
|
||||||
sequences = [
|
|
||||||
"Testing batch encode plus",
|
|
||||||
"Testing batch encode plus with different sequence lengths",
|
|
||||||
"Testing batch encode plus with different sequence lengths correctly pads",
|
|
||||||
]
|
|
||||||
|
|
||||||
table = self.get_table(tokenizer, length=0)
|
|
||||||
|
|
||||||
# A Tensor cannot be build by sequences which are not the same size
|
|
||||||
self.assertRaises(ValueError, tokenizer.batch_encode_plus, table, sequences, return_tensors="pt")
|
|
||||||
self.assertRaises(ValueError, tokenizer.batch_encode_plus, table, sequences, return_tensors="tf")
|
|
||||||
|
|
||||||
if tokenizer.pad_token_id is None:
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus,
|
|
||||||
table,
|
|
||||||
sequences,
|
|
||||||
padding=True,
|
|
||||||
return_tensors="pt",
|
|
||||||
)
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus,
|
|
||||||
table,
|
|
||||||
sequences,
|
|
||||||
padding="longest",
|
|
||||||
return_tensors="tf",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
pytorch_tensor = tokenizer.batch_encode_plus(table, sequences, padding=True, return_tensors="pt")
|
|
||||||
tensorflow_tensor = tokenizer.batch_encode_plus(
|
|
||||||
table, sequences, padding="longest", return_tensors="tf"
|
|
||||||
)
|
|
||||||
encoded_sequences = tokenizer.batch_encode_plus(table, sequences, padding=True)
|
|
||||||
|
|
||||||
for key in encoded_sequences.keys():
|
|
||||||
pytorch_value = pytorch_tensor[key].tolist()
|
|
||||||
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
|
|
||||||
encoded_value = encoded_sequences[key]
|
|
||||||
|
|
||||||
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
|
|
||||||
|
|
||||||
@slow
|
@slow
|
||||||
def test_tapas_integration_test(self):
|
def test_tapas_integration_test(self):
|
||||||
data = {
|
data = {
|
||||||
|
@ -30,7 +30,6 @@ from transformers import (
|
|||||||
)
|
)
|
||||||
from transformers.testing_utils import (
|
from transformers.testing_utils import (
|
||||||
get_tests_dir,
|
get_tests_dir,
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_pandas,
|
require_pandas,
|
||||||
require_sentencepiece,
|
require_sentencepiece,
|
||||||
require_tokenizers,
|
require_tokenizers,
|
||||||
@ -1374,54 +1373,6 @@ class UdopTokenizationTest(TokenizerTesterMixin, unittest.TestCase):
|
|||||||
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
|
self.assertListEqual(new_encoded_inputs, dropped_encoded_inputs)
|
||||||
self.assertLessEqual(len(new_encoded_inputs), 20)
|
self.assertLessEqual(len(new_encoded_inputs), 20)
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_batch_encode_plus_tensors(self):
|
|
||||||
tokenizers = self.get_tokenizers(do_lower_case=False)
|
|
||||||
for tokenizer in tokenizers:
|
|
||||||
with self.subTest(f"{tokenizer.__class__.__name__}"):
|
|
||||||
words, boxes = self.get_words_and_boxes_batch()
|
|
||||||
|
|
||||||
# A Tensor cannot be build by sequences which are not the same size
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError, tokenizer.batch_encode_plus_boxes, words, boxes=boxes, return_tensors="pt"
|
|
||||||
)
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError, tokenizer.batch_encode_plus_boxes, words, boxes=boxes, return_tensors="tf"
|
|
||||||
)
|
|
||||||
|
|
||||||
if tokenizer.pad_token_id is None:
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus_boxes,
|
|
||||||
words,
|
|
||||||
boxes=boxes,
|
|
||||||
padding=True,
|
|
||||||
return_tensors="pt",
|
|
||||||
)
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus_boxes,
|
|
||||||
words,
|
|
||||||
boxes=boxes,
|
|
||||||
padding="longest",
|
|
||||||
return_tensors="tf",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
pytorch_tensor = tokenizer.batch_encode_plus_boxes(
|
|
||||||
words, boxes=boxes, padding=True, return_tensors="pt"
|
|
||||||
)
|
|
||||||
tensorflow_tensor = tokenizer.batch_encode_plus_boxes(
|
|
||||||
words, boxes=boxes, padding="longest", return_tensors="tf"
|
|
||||||
)
|
|
||||||
encoded_sequences = tokenizer.batch_encode_plus_boxes(words, boxes=boxes, padding=True)
|
|
||||||
|
|
||||||
for key in encoded_sequences.keys():
|
|
||||||
pytorch_value = pytorch_tensor[key].tolist()
|
|
||||||
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
|
|
||||||
encoded_value = encoded_sequences[key]
|
|
||||||
|
|
||||||
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
|
|
||||||
|
|
||||||
def test_sequence_ids(self):
|
def test_sequence_ids(self):
|
||||||
tokenizers = self.get_tokenizers()
|
tokenizers = self.get_tokenizers()
|
||||||
for tokenizer in tokenizers:
|
for tokenizer in tokenizers:
|
||||||
|
@ -16,23 +16,18 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import copy
|
|
||||||
import os
|
import os
|
||||||
import tempfile
|
import tempfile
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from transformers import is_tf_available, is_torch_available, is_vision_available
|
from transformers import is_tf_available, is_vision_available
|
||||||
from transformers.testing_utils import (
|
from transformers.testing_utils import (
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_tf,
|
require_tf,
|
||||||
require_torch,
|
|
||||||
require_vision,
|
require_vision,
|
||||||
slow,
|
slow,
|
||||||
torch_device,
|
|
||||||
)
|
)
|
||||||
from transformers.utils.generic import ModelOutput
|
|
||||||
|
|
||||||
from ...test_modeling_tf_common import floats_tensor, ids_tensor
|
from ...test_modeling_tf_common import floats_tensor, ids_tensor
|
||||||
from ..gpt2.test_modeling_tf_gpt2 import TFGPT2ModelTester
|
from ..gpt2.test_modeling_tf_gpt2 import TFGPT2ModelTester
|
||||||
@ -55,11 +50,6 @@ if is_tf_available():
|
|||||||
)
|
)
|
||||||
from transformers.modeling_tf_outputs import TFBaseModelOutput
|
from transformers.modeling_tf_outputs import TFBaseModelOutput
|
||||||
|
|
||||||
if is_torch_available():
|
|
||||||
import torch
|
|
||||||
|
|
||||||
from transformers import GPT2LMHeadModel, VisionEncoderDecoderModel, ViTModel
|
|
||||||
|
|
||||||
if is_vision_available():
|
if is_vision_available():
|
||||||
from PIL import Image
|
from PIL import Image
|
||||||
|
|
||||||
@ -318,185 +308,6 @@ class TFVisionEncoderDecoderMixin:
|
|||||||
tuple(generated_output.shape.as_list()), (pixel_values.shape[0],) + (decoder_config.max_length,)
|
tuple(generated_output.shape.as_list()), (pixel_values.shape[0],) + (decoder_config.max_length,)
|
||||||
)
|
)
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-5, name="outputs", attributes=None):
|
|
||||||
"""Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_class: The class of the model that is currently testing. For example, `TFBertModel`,
|
|
||||||
TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative
|
|
||||||
error messages.
|
|
||||||
name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc.
|
|
||||||
attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element
|
|
||||||
being a named field in the output.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.assertEqual(type(name), str)
|
|
||||||
if attributes is not None:
|
|
||||||
self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`")
|
|
||||||
|
|
||||||
# Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`).
|
|
||||||
if isinstance(tf_outputs, ModelOutput):
|
|
||||||
self.assertTrue(
|
|
||||||
isinstance(pt_outputs, ModelOutput),
|
|
||||||
f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is",
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_keys = [k for k, v in tf_outputs.items() if v is not None]
|
|
||||||
pt_keys = [k for k, v in pt_outputs.items() if v is not None]
|
|
||||||
|
|
||||||
self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch")
|
|
||||||
|
|
||||||
# convert to the case of `tuple`
|
|
||||||
# appending each key to the current (string) `names`
|
|
||||||
attributes = tuple([f"{name}.{k}" for k in tf_keys])
|
|
||||||
self.check_pt_tf_outputs(
|
|
||||||
tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes
|
|
||||||
)
|
|
||||||
|
|
||||||
# Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.)
|
|
||||||
elif type(tf_outputs) in [tuple, list]:
|
|
||||||
self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch")
|
|
||||||
self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch")
|
|
||||||
|
|
||||||
if attributes is not None:
|
|
||||||
# case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`)
|
|
||||||
self.assertEqual(
|
|
||||||
len(attributes),
|
|
||||||
len(tf_outputs),
|
|
||||||
f"{name}: The tuple `names` should have the same length as `tf_outputs`",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `names`
|
|
||||||
attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))])
|
|
||||||
|
|
||||||
for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes):
|
|
||||||
self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr)
|
|
||||||
|
|
||||||
elif isinstance(tf_outputs, tf.Tensor):
|
|
||||||
self.assertTrue(
|
|
||||||
isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is"
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_outputs = tf_outputs.numpy()
|
|
||||||
pt_outputs = pt_outputs.detach().to("cpu").numpy()
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch"
|
|
||||||
)
|
|
||||||
|
|
||||||
# deal with NumPy's scalars to make replacing nan values by 0 work.
|
|
||||||
if np.isscalar(tf_outputs):
|
|
||||||
tf_outputs = np.array([tf_outputs])
|
|
||||||
pt_outputs = np.array([pt_outputs])
|
|
||||||
|
|
||||||
tf_nans = np.isnan(tf_outputs)
|
|
||||||
pt_nans = np.isnan(pt_outputs)
|
|
||||||
|
|
||||||
pt_outputs[tf_nans] = 0
|
|
||||||
tf_outputs[tf_nans] = 0
|
|
||||||
pt_outputs[pt_nans] = 0
|
|
||||||
tf_outputs[pt_nans] = 0
|
|
||||||
|
|
||||||
max_diff = np.amax(np.abs(tf_outputs - pt_outputs))
|
|
||||||
self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).")
|
|
||||||
else:
|
|
||||||
raise ValueError(
|
|
||||||
"`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got"
|
|
||||||
f" {type(tf_outputs)} instead."
|
|
||||||
)
|
|
||||||
|
|
||||||
def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict):
|
|
||||||
pt_inputs_dict = {}
|
|
||||||
for name, key in tf_inputs_dict.items():
|
|
||||||
if isinstance(key, bool):
|
|
||||||
pt_inputs_dict[name] = key
|
|
||||||
elif name == "input_values":
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
|
|
||||||
elif name == "pixel_values":
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
|
|
||||||
elif name == "input_features":
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
|
|
||||||
# other general float inputs
|
|
||||||
elif tf_inputs_dict[name].dtype.is_floating:
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
|
|
||||||
else:
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
|
|
||||||
|
|
||||||
return pt_inputs_dict
|
|
||||||
|
|
||||||
def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict):
|
|
||||||
pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict)
|
|
||||||
|
|
||||||
# send pytorch inputs to the correct device
|
|
||||||
pt_inputs_dict = {
|
|
||||||
k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items()
|
|
||||||
}
|
|
||||||
|
|
||||||
# send pytorch model to the correct device
|
|
||||||
pt_model.to(torch_device)
|
|
||||||
|
|
||||||
# Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences
|
|
||||||
pt_model.eval()
|
|
||||||
|
|
||||||
with torch.no_grad():
|
|
||||||
pt_outputs = pt_model(**pt_inputs_dict)
|
|
||||||
tf_outputs = tf_model(tf_inputs_dict)
|
|
||||||
|
|
||||||
# tf models returned loss is usually a tensor rather than a scalar.
|
|
||||||
# (see `hf_compute_loss`: it uses `keras.losses.Reduction.NONE`)
|
|
||||||
# Change it here to a scalar to match PyTorch models' loss
|
|
||||||
tf_loss = getattr(tf_outputs, "loss", None)
|
|
||||||
if tf_loss is not None:
|
|
||||||
tf_outputs.loss = tf.math.reduce_mean(tf_loss)
|
|
||||||
|
|
||||||
self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model))
|
|
||||||
|
|
||||||
def check_pt_tf_equivalence(self, tf_model, pt_model, tf_inputs_dict):
|
|
||||||
"""Wrap `check_pt_tf_models` to further check PT -> TF again"""
|
|
||||||
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
# PT -> TF
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
pt_model.save_pretrained(tmpdirname)
|
|
||||||
tf_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname)
|
|
||||||
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
def check_pt_to_tf_equivalence(self, config, decoder_config, tf_inputs_dict):
|
|
||||||
encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config)
|
|
||||||
# Output all for aggressive testing
|
|
||||||
encoder_decoder_config.output_hidden_states = True
|
|
||||||
# All models tested in this file have attentions
|
|
||||||
encoder_decoder_config.output_attentions = True
|
|
||||||
|
|
||||||
pt_model = VisionEncoderDecoderModel(encoder_decoder_config)
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
pt_model.save_pretrained(tmpdirname)
|
|
||||||
tf_model = TFVisionEncoderDecoderModel.from_pretrained(tmpdirname)
|
|
||||||
|
|
||||||
self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
def check_tf_to_pt_equivalence(self, config, decoder_config, tf_inputs_dict):
|
|
||||||
encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config)
|
|
||||||
# Output all for aggressive testing
|
|
||||||
encoder_decoder_config.output_hidden_states = True
|
|
||||||
# TODO: A generalizable way to determine this attribute
|
|
||||||
encoder_decoder_config.output_attentions = True
|
|
||||||
|
|
||||||
tf_model = TFVisionEncoderDecoderModel(encoder_decoder_config)
|
|
||||||
# Make sure model is built before saving
|
|
||||||
tf_model(**tf_inputs_dict)
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
tf_model.save_pretrained(tmpdirname, safe_serialization=False)
|
|
||||||
pt_model = VisionEncoderDecoderModel.from_pretrained(
|
|
||||||
tmpdirname, from_tf=True, attn_implementation=tf_model.config._attn_implementation
|
|
||||||
)
|
|
||||||
|
|
||||||
self.check_pt_tf_equivalence(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
def test_encoder_decoder_model(self):
|
def test_encoder_decoder_model(self):
|
||||||
config_inputs_dict = self.prepare_config_and_inputs()
|
config_inputs_dict = self.prepare_config_and_inputs()
|
||||||
self.check_encoder_decoder_model(**config_inputs_dict)
|
self.check_encoder_decoder_model(**config_inputs_dict)
|
||||||
@ -533,69 +344,6 @@ class TFVisionEncoderDecoderMixin:
|
|||||||
diff = np.abs((a - b)).max()
|
diff = np.abs((a - b)).max()
|
||||||
self.assertLessEqual(diff, tol, f"Difference between torch and tf is {diff} (>= {tol}).")
|
self.assertLessEqual(diff, tol, f"Difference between torch and tf is {diff} (>= {tol}).")
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self):
|
|
||||||
config_inputs_dict = self.prepare_config_and_inputs()
|
|
||||||
labels = config_inputs_dict.pop("decoder_token_labels")
|
|
||||||
|
|
||||||
# Keep only common arguments
|
|
||||||
arg_names = [
|
|
||||||
"config",
|
|
||||||
"pixel_values",
|
|
||||||
"decoder_config",
|
|
||||||
"decoder_input_ids",
|
|
||||||
"decoder_attention_mask",
|
|
||||||
"encoder_hidden_states",
|
|
||||||
]
|
|
||||||
config_inputs_dict = {k: v for k, v in config_inputs_dict.items() if k in arg_names}
|
|
||||||
|
|
||||||
config = config_inputs_dict.pop("config")
|
|
||||||
decoder_config = config_inputs_dict.pop("decoder_config")
|
|
||||||
|
|
||||||
# Output all for aggressive testing
|
|
||||||
config.output_hidden_states = True
|
|
||||||
decoder_config.output_hidden_states = True
|
|
||||||
# All models tested in this file have attentions
|
|
||||||
config.output_attentions = True
|
|
||||||
decoder_config.output_attentions = True
|
|
||||||
|
|
||||||
tf_inputs_dict = config_inputs_dict
|
|
||||||
# `encoder_hidden_states` is not used in model call/forward
|
|
||||||
del tf_inputs_dict["encoder_hidden_states"]
|
|
||||||
|
|
||||||
# Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency
|
|
||||||
# of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`.
|
|
||||||
for k in ["decoder_attention_mask"]:
|
|
||||||
attention_mask = tf_inputs_dict[k]
|
|
||||||
|
|
||||||
# Make sure no all 0s attention masks - to avoid failure at this moment.
|
|
||||||
# Put `1` at the beginning of sequences to make it still work when combining causal attention masks.
|
|
||||||
# TODO: remove this line once a fix regarding large negative values for attention mask is done.
|
|
||||||
attention_mask = tf.concat(
|
|
||||||
[tf.ones_like(attention_mask[:, :1], dtype=attention_mask.dtype), attention_mask[:, 1:]], axis=-1
|
|
||||||
)
|
|
||||||
tf_inputs_dict[k] = attention_mask
|
|
||||||
|
|
||||||
tf_inputs_dict_with_labels = copy.copy(tf_inputs_dict)
|
|
||||||
tf_inputs_dict_with_labels["labels"] = labels
|
|
||||||
|
|
||||||
self.assertTrue(decoder_config.cross_attention_hidden_size is None)
|
|
||||||
|
|
||||||
# Original test: check without `labels` and without `enc_to_dec_proj` projection
|
|
||||||
self.assertTrue(config.hidden_size == decoder_config.hidden_size)
|
|
||||||
self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict)
|
|
||||||
self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict)
|
|
||||||
|
|
||||||
# check with `labels`
|
|
||||||
self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict_with_labels)
|
|
||||||
self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict_with_labels)
|
|
||||||
|
|
||||||
# check `enc_to_dec_proj` work as expected
|
|
||||||
decoder_config.hidden_size = decoder_config.hidden_size * 2
|
|
||||||
self.assertTrue(config.hidden_size != decoder_config.hidden_size)
|
|
||||||
self.check_pt_to_tf_equivalence(config, decoder_config, tf_inputs_dict)
|
|
||||||
self.check_tf_to_pt_equivalence(config, decoder_config, tf_inputs_dict)
|
|
||||||
|
|
||||||
@slow
|
@slow
|
||||||
def test_real_model_save_load_from_pretrained(self):
|
def test_real_model_save_load_from_pretrained(self):
|
||||||
model_2 = self.get_pretrained_model()
|
model_2 = self.get_pretrained_model()
|
||||||
@ -781,56 +529,6 @@ class TFVisionEncoderDecoderModelSaveLoadTests(unittest.TestCase):
|
|||||||
max_diff = np.max(np.abs(logits_2.numpy() - logits_orig.numpy()))
|
max_diff = np.max(np.abs(logits_2.numpy() - logits_orig.numpy()))
|
||||||
self.assertAlmostEqual(max_diff, 0.0, places=4)
|
self.assertAlmostEqual(max_diff, 0.0, places=4)
|
||||||
|
|
||||||
@require_torch
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_encoder_decoder_save_load_from_encoder_decoder_from_pt(self):
|
|
||||||
config = self.get_encoder_decoder_config_small()
|
|
||||||
|
|
||||||
# create two random ViT/GPT2 models for vit-gpt2 & initialize weights (+cross_attention weights)
|
|
||||||
encoder_pt = ViTModel(config.encoder).to(torch_device).eval()
|
|
||||||
decoder_pt = GPT2LMHeadModel(config.decoder).to(torch_device).eval()
|
|
||||||
|
|
||||||
encoder_decoder_pt = VisionEncoderDecoderModel(encoder=encoder_pt, decoder=decoder_pt).to(torch_device).eval()
|
|
||||||
|
|
||||||
pixel_values = floats_tensor(
|
|
||||||
[
|
|
||||||
13,
|
|
||||||
encoder_pt.config.num_channels,
|
|
||||||
encoder_pt.config.image_size,
|
|
||||||
encoder_pt.config.image_size,
|
|
||||||
]
|
|
||||||
)
|
|
||||||
decoder_input_ids = ids_tensor([13, 1], decoder_pt.config.vocab_size)
|
|
||||||
|
|
||||||
pt_pixel_values = torch.tensor(pixel_values.numpy(), device=torch_device, dtype=torch.float)
|
|
||||||
pt_decoder_input_ids = torch.tensor(decoder_input_ids.numpy(), device=torch_device, dtype=torch.long)
|
|
||||||
|
|
||||||
logits_pt = encoder_decoder_pt(pixel_values=pt_pixel_values, decoder_input_ids=pt_decoder_input_ids).logits
|
|
||||||
|
|
||||||
# PyTorch => TensorFlow
|
|
||||||
with tempfile.TemporaryDirectory() as tmp_dirname_1, tempfile.TemporaryDirectory() as tmp_dirname_2:
|
|
||||||
encoder_decoder_pt.encoder.save_pretrained(tmp_dirname_1)
|
|
||||||
encoder_decoder_pt.decoder.save_pretrained(tmp_dirname_2)
|
|
||||||
encoder_decoder_tf = TFVisionEncoderDecoderModel.from_encoder_decoder_pretrained(
|
|
||||||
tmp_dirname_1, tmp_dirname_2
|
|
||||||
)
|
|
||||||
|
|
||||||
logits_tf = encoder_decoder_tf(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits
|
|
||||||
|
|
||||||
max_diff = np.max(np.abs(logits_pt.detach().cpu().numpy() - logits_tf.numpy()))
|
|
||||||
self.assertAlmostEqual(max_diff, 0.0, places=3)
|
|
||||||
|
|
||||||
# Make sure `from_pretrained` following `save_pretrained` work and give the same result
|
|
||||||
# (See https://github.com/huggingface/transformers/pull/14016)
|
|
||||||
with tempfile.TemporaryDirectory() as tmp_dirname:
|
|
||||||
encoder_decoder_tf.save_pretrained(tmp_dirname, safe_serialization=False)
|
|
||||||
encoder_decoder_tf = TFVisionEncoderDecoderModel.from_pretrained(tmp_dirname)
|
|
||||||
|
|
||||||
logits_tf_2 = encoder_decoder_tf(pixel_values=pixel_values, decoder_input_ids=decoder_input_ids).logits
|
|
||||||
|
|
||||||
max_diff = np.max(np.abs(logits_tf_2.numpy() - logits_tf.numpy()))
|
|
||||||
self.assertAlmostEqual(max_diff, 0.0, places=3)
|
|
||||||
|
|
||||||
@require_vision
|
@require_vision
|
||||||
@slow
|
@slow
|
||||||
def test_encoder_decoder_from_pretrained(self):
|
def test_encoder_decoder_from_pretrained(self):
|
||||||
|
@ -270,22 +270,6 @@ class TFViTMAEModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestCa
|
|||||||
output_for_kw_input = model(**inputs_np, noise=noise)
|
output_for_kw_input = model(**inputs_np, noise=noise)
|
||||||
self.assert_outputs_same(output_for_dict_input, output_for_kw_input)
|
self.assert_outputs_same(output_for_dict_input, output_for_kw_input)
|
||||||
|
|
||||||
# overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise
|
|
||||||
# to generate masks during test
|
|
||||||
def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict):
|
|
||||||
# make masks reproducible
|
|
||||||
np.random.seed(2)
|
|
||||||
|
|
||||||
num_patches = int((tf_model.config.image_size // tf_model.config.patch_size) ** 2)
|
|
||||||
noise = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
|
|
||||||
tf_noise = tf.constant(noise)
|
|
||||||
|
|
||||||
# Add `noise` argument.
|
|
||||||
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
|
|
||||||
tf_inputs_dict["noise"] = tf_noise
|
|
||||||
|
|
||||||
super().check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
# overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise
|
# overwrite from common since TFViTMAEForPretraining has random masking, we need to fix the noise
|
||||||
# to generate masks during test
|
# to generate masks during test
|
||||||
def test_keras_save_load(self):
|
def test_keras_save_load(self):
|
||||||
|
@ -204,22 +204,6 @@ class ViTMAEModelTest(ModelTesterMixin, PipelineTesterMixin, unittest.TestCase):
|
|||||||
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
config_and_inputs = self.model_tester.prepare_config_and_inputs()
|
||||||
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
|
self.model_tester.create_and_check_for_pretraining(*config_and_inputs)
|
||||||
|
|
||||||
# overwrite from common since ViTMAEForPretraining has random masking, we need to fix the noise
|
|
||||||
# to generate masks during test
|
|
||||||
def check_pt_tf_models(self, tf_model, pt_model, pt_inputs_dict):
|
|
||||||
# make masks reproducible
|
|
||||||
np.random.seed(2)
|
|
||||||
|
|
||||||
num_patches = int((pt_model.config.image_size // pt_model.config.patch_size) ** 2)
|
|
||||||
noise = np.random.uniform(size=(self.model_tester.batch_size, num_patches))
|
|
||||||
pt_noise = torch.from_numpy(noise)
|
|
||||||
|
|
||||||
# Add `noise` argument.
|
|
||||||
# PT inputs will be prepared in `super().check_pt_tf_models()` with this added `noise` argument
|
|
||||||
pt_inputs_dict["noise"] = pt_noise
|
|
||||||
|
|
||||||
super().check_pt_tf_models(tf_model, pt_model, pt_inputs_dict)
|
|
||||||
|
|
||||||
def test_save_load(self):
|
def test_save_load(self):
|
||||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||||
|
|
||||||
|
@ -22,8 +22,6 @@ import glob
|
|||||||
import inspect
|
import inspect
|
||||||
import math
|
import math
|
||||||
import multiprocessing
|
import multiprocessing
|
||||||
import os
|
|
||||||
import tempfile
|
|
||||||
import traceback
|
import traceback
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
@ -36,7 +34,6 @@ from transformers import Wav2Vec2Config, is_tf_available
|
|||||||
from transformers.testing_utils import (
|
from transformers.testing_utils import (
|
||||||
CaptureLogger,
|
CaptureLogger,
|
||||||
is_flaky,
|
is_flaky,
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_librosa,
|
require_librosa,
|
||||||
require_pyctcdecode,
|
require_pyctcdecode,
|
||||||
require_tf,
|
require_tf,
|
||||||
@ -438,62 +435,6 @@ class TFWav2Vec2ModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.Test
|
|||||||
# TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC
|
# TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=False):
|
|
||||||
# We override the base test here to skip loss calculation for Wav2Vec2 models because the loss is massive with
|
|
||||||
# the default labels and frequently overflows to inf or exceeds numerical tolerances between TF/PT
|
|
||||||
import torch
|
|
||||||
|
|
||||||
import transformers
|
|
||||||
|
|
||||||
for model_class in self.all_model_classes:
|
|
||||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
|
||||||
|
|
||||||
# Output all for aggressive testing
|
|
||||||
config.output_hidden_states = True
|
|
||||||
config.output_attentions = self.has_attentions
|
|
||||||
|
|
||||||
# Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency
|
|
||||||
# of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`.
|
|
||||||
# TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it.
|
|
||||||
self._make_attention_mask_non_null(inputs_dict)
|
|
||||||
|
|
||||||
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning
|
|
||||||
pt_model_class = getattr(transformers, pt_model_class_name)
|
|
||||||
|
|
||||||
tf_model = model_class(config)
|
|
||||||
pt_model = pt_model_class(config)
|
|
||||||
|
|
||||||
tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
|
|
||||||
|
|
||||||
# Check we can load pt model in tf and vice-versa with model => model functions
|
|
||||||
tf_model = transformers.load_pytorch_model_in_tf2_model(
|
|
||||||
tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
pt_model = transformers.load_tf2_model_in_pytorch_model(
|
|
||||||
pt_model, tf_model, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
# Original test: check without `labels`
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
|
|
||||||
torch.save(pt_model.state_dict(), pt_checkpoint_path)
|
|
||||||
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(
|
|
||||||
tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
|
|
||||||
tf_model.save_weights(tf_checkpoint_path)
|
|
||||||
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(
|
|
||||||
pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
# Original test: check without `labels`
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
|
|
||||||
@require_tf
|
@require_tf
|
||||||
class TFWav2Vec2RobustModelTest(TFModelTesterMixin, unittest.TestCase):
|
class TFWav2Vec2RobustModelTest(TFModelTesterMixin, unittest.TestCase):
|
||||||
@ -623,62 +564,6 @@ class TFWav2Vec2RobustModelTest(TFModelTesterMixin, unittest.TestCase):
|
|||||||
# TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC
|
# TODO: (Amy) - check whether skipping CTC model resolves this issue and possible resolutions for CTC
|
||||||
pass
|
pass
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=False):
|
|
||||||
# We override the base test here to skip loss calculation for Wav2Vec2 models because the loss is massive with
|
|
||||||
# the default labels and frequently overflows to inf or exceeds numerical tolerances between TF/PT
|
|
||||||
import torch
|
|
||||||
|
|
||||||
import transformers
|
|
||||||
|
|
||||||
for model_class in self.all_model_classes:
|
|
||||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
|
||||||
|
|
||||||
# Output all for aggressive testing
|
|
||||||
config.output_hidden_states = True
|
|
||||||
config.output_attentions = self.has_attentions
|
|
||||||
|
|
||||||
# Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency
|
|
||||||
# of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`.
|
|
||||||
# TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it.
|
|
||||||
self._make_attention_mask_non_null(inputs_dict)
|
|
||||||
|
|
||||||
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning
|
|
||||||
pt_model_class = getattr(transformers, pt_model_class_name)
|
|
||||||
|
|
||||||
tf_model = model_class(config)
|
|
||||||
pt_model = pt_model_class(config)
|
|
||||||
|
|
||||||
tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
|
|
||||||
|
|
||||||
# Check we can load pt model in tf and vice-versa with model => model functions
|
|
||||||
tf_model = transformers.load_pytorch_model_in_tf2_model(
|
|
||||||
tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
pt_model = transformers.load_tf2_model_in_pytorch_model(
|
|
||||||
pt_model, tf_model, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
# Original test: check without `labels`
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
|
|
||||||
torch.save(pt_model.state_dict(), pt_checkpoint_path)
|
|
||||||
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(
|
|
||||||
tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
|
|
||||||
tf_model.save_weights(tf_checkpoint_path)
|
|
||||||
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(
|
|
||||||
pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
# Original test: check without `labels`
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
|
|
||||||
|
|
||||||
@require_tf
|
@require_tf
|
||||||
class TFWav2Vec2UtilsTest(unittest.TestCase):
|
class TFWav2Vec2UtilsTest(unittest.TestCase):
|
||||||
|
@ -433,10 +433,6 @@ class TFWhisperModelTest(TFModelTesterMixin, PipelineTesterMixin, unittest.TestC
|
|||||||
|
|
||||||
check_hidden_states_output(inputs_dict, config, model_class)
|
check_hidden_states_output(inputs_dict, config, model_class)
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None):
|
|
||||||
# We override with a slightly higher tol value, as test recently became flaky
|
|
||||||
super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes)
|
|
||||||
|
|
||||||
def test_attention_outputs(self):
|
def test_attention_outputs(self):
|
||||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
||||||
config.return_dict = True
|
config.return_dict = True
|
||||||
|
@ -1069,10 +1069,6 @@ class WhisperModelTest(ModelTesterMixin, GenerationTesterMixin, PipelineTesterMi
|
|||||||
|
|
||||||
self.assertTrue(models_equal)
|
self.assertTrue(models_equal)
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None):
|
|
||||||
# We override with a slightly higher tol value, as test recently became flaky
|
|
||||||
super().check_pt_tf_outputs(tf_outputs, pt_outputs, model_class, tol, name, attributes)
|
|
||||||
|
|
||||||
def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None):
|
def check_pt_flax_outputs(self, fx_outputs, pt_outputs, model_class, tol=5e-5, name="outputs", attributes=None):
|
||||||
# We override with a slightly higher tol value, as test recently became flaky
|
# We override with a slightly higher tol value, as test recently became flaky
|
||||||
super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes)
|
super().check_pt_flax_outputs(fx_outputs, pt_outputs, model_class, tol, name, attributes)
|
||||||
|
@ -76,7 +76,6 @@ from transformers.testing_utils import (
|
|||||||
CaptureLogger,
|
CaptureLogger,
|
||||||
is_flaky,
|
is_flaky,
|
||||||
is_pt_flax_cross_test,
|
is_pt_flax_cross_test,
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_accelerate,
|
require_accelerate,
|
||||||
require_bitsandbytes,
|
require_bitsandbytes,
|
||||||
require_deepspeed,
|
require_deepspeed,
|
||||||
@ -129,7 +128,7 @@ if is_torch_available():
|
|||||||
|
|
||||||
|
|
||||||
if is_tf_available():
|
if is_tf_available():
|
||||||
import tensorflow as tf
|
pass
|
||||||
|
|
||||||
if is_flax_available():
|
if is_flax_available():
|
||||||
import jax.numpy as jnp
|
import jax.numpy as jnp
|
||||||
@ -2549,236 +2548,6 @@ class ModelTesterMixin:
|
|||||||
|
|
||||||
return new_tf_outputs, new_pt_outputs
|
return new_tf_outputs, new_pt_outputs
|
||||||
|
|
||||||
# Copied from tests.test_modeling_tf_common.TFModelTesterMixin.check_pt_tf_outputs
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None):
|
|
||||||
"""Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_class: The class of the model that is currently testing. For example, `TFBertModel`,
|
|
||||||
TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative
|
|
||||||
error messages.
|
|
||||||
name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc.
|
|
||||||
attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element
|
|
||||||
being a named field in the output.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.assertEqual(type(name), str)
|
|
||||||
if attributes is not None:
|
|
||||||
self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`")
|
|
||||||
|
|
||||||
# Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`).
|
|
||||||
if isinstance(tf_outputs, ModelOutput):
|
|
||||||
self.assertTrue(
|
|
||||||
isinstance(pt_outputs, ModelOutput),
|
|
||||||
f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Don't copy this block to model specific test file!
|
|
||||||
# TODO: remove this method and this line after issues are fixed
|
|
||||||
tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class)
|
|
||||||
|
|
||||||
tf_keys = [k for k, v in tf_outputs.items() if v is not None]
|
|
||||||
pt_keys = [k for k, v in pt_outputs.items() if v is not None]
|
|
||||||
|
|
||||||
self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch")
|
|
||||||
|
|
||||||
# convert to the case of `tuple`
|
|
||||||
# appending each key to the current (string) `name`
|
|
||||||
attributes = tuple([f"{name}.{k}" for k in tf_keys])
|
|
||||||
self.check_pt_tf_outputs(
|
|
||||||
tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes
|
|
||||||
)
|
|
||||||
|
|
||||||
# Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.)
|
|
||||||
elif type(tf_outputs) in [tuple, list]:
|
|
||||||
self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch")
|
|
||||||
self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch")
|
|
||||||
|
|
||||||
if attributes is not None:
|
|
||||||
# case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`)
|
|
||||||
self.assertEqual(
|
|
||||||
len(attributes),
|
|
||||||
len(tf_outputs),
|
|
||||||
f"{name}: The tuple `attributes` should have the same length as `tf_outputs`",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `name`
|
|
||||||
attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))])
|
|
||||||
|
|
||||||
for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes):
|
|
||||||
if isinstance(pt_output, DynamicCache):
|
|
||||||
pt_output = pt_output.to_legacy_cache()
|
|
||||||
self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr)
|
|
||||||
|
|
||||||
elif isinstance(tf_outputs, tf.Tensor):
|
|
||||||
self.assertTrue(
|
|
||||||
isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is"
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_outputs = tf_outputs.numpy()
|
|
||||||
pt_outputs = pt_outputs.detach().to("cpu").numpy()
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch"
|
|
||||||
)
|
|
||||||
|
|
||||||
# deal with NumPy's scalars to make replacing nan values by 0 work.
|
|
||||||
if np.isscalar(tf_outputs):
|
|
||||||
tf_outputs = np.array([tf_outputs])
|
|
||||||
pt_outputs = np.array([pt_outputs])
|
|
||||||
|
|
||||||
tf_nans = np.isnan(tf_outputs)
|
|
||||||
pt_nans = np.isnan(pt_outputs)
|
|
||||||
|
|
||||||
pt_outputs[tf_nans] = 0
|
|
||||||
tf_outputs[tf_nans] = 0
|
|
||||||
pt_outputs[pt_nans] = 0
|
|
||||||
tf_outputs[pt_nans] = 0
|
|
||||||
|
|
||||||
max_diff = np.amax(np.abs(tf_outputs - pt_outputs))
|
|
||||||
self.assertLessEqual(
|
|
||||||
max_diff,
|
|
||||||
tol,
|
|
||||||
f"{name}: Difference between PyTorch and TF is {max_diff} (>= {tol}) for {model_class.__name__}",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
raise ValueError(
|
|
||||||
"`tf_outputs` should be an instance of `ModelOutput`, a `tuple`, or an instance of `tf.Tensor`. Got"
|
|
||||||
f" {type(tf_outputs)} instead."
|
|
||||||
)
|
|
||||||
|
|
||||||
def prepare_tf_inputs_from_pt_inputs(self, pt_inputs_dict):
|
|
||||||
tf_inputs_dict = {}
|
|
||||||
for key, tensor in pt_inputs_dict.items():
|
|
||||||
# skip key that does not exist in tf
|
|
||||||
if isinstance(tensor, bool):
|
|
||||||
tf_inputs_dict[key] = tensor
|
|
||||||
elif key == "input_values":
|
|
||||||
tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32)
|
|
||||||
elif key == "pixel_values":
|
|
||||||
tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32)
|
|
||||||
elif key == "input_features":
|
|
||||||
tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32)
|
|
||||||
# other general float inputs
|
|
||||||
elif tensor.is_floating_point():
|
|
||||||
tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.float32)
|
|
||||||
else:
|
|
||||||
tf_inputs_dict[key] = tf.convert_to_tensor(tensor.cpu().numpy(), dtype=tf.int32)
|
|
||||||
|
|
||||||
return tf_inputs_dict
|
|
||||||
|
|
||||||
def check_pt_tf_models(self, tf_model, pt_model, pt_inputs_dict):
|
|
||||||
tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict)
|
|
||||||
|
|
||||||
# send pytorch inputs to the correct device
|
|
||||||
pt_inputs_dict = {
|
|
||||||
k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items()
|
|
||||||
}
|
|
||||||
|
|
||||||
# send pytorch model to the correct device
|
|
||||||
pt_model.to(torch_device)
|
|
||||||
|
|
||||||
# Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences
|
|
||||||
pt_model.eval()
|
|
||||||
|
|
||||||
with torch.no_grad():
|
|
||||||
pt_outputs = pt_model(**pt_inputs_dict)
|
|
||||||
tf_outputs = tf_model(tf_inputs_dict)
|
|
||||||
|
|
||||||
# tf models returned loss is usually a tensor rather than a scalar.
|
|
||||||
# (see `hf_compute_loss`: it uses `tf.keras.losses.Reduction.NONE`)
|
|
||||||
# Change it here to a scalar to match PyTorch models' loss
|
|
||||||
tf_loss = getattr(tf_outputs, "loss", None)
|
|
||||||
if tf_loss is not None:
|
|
||||||
tf_outputs.loss = tf.math.reduce_mean(tf_loss)
|
|
||||||
|
|
||||||
self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(pt_model))
|
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=False):
|
|
||||||
import transformers
|
|
||||||
|
|
||||||
for model_class in self.all_model_classes:
|
|
||||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
|
||||||
|
|
||||||
tf_model_class_name = "TF" + model_class.__name__ # Add the "TF" at the beginning
|
|
||||||
if not hasattr(transformers, tf_model_class_name):
|
|
||||||
self.skipTest(reason="transformers does not have TF version of this model yet")
|
|
||||||
|
|
||||||
# Output all for aggressive testing
|
|
||||||
config.output_hidden_states = True
|
|
||||||
config.output_attentions = self.has_attentions
|
|
||||||
|
|
||||||
# Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency
|
|
||||||
# of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`.
|
|
||||||
# TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it.
|
|
||||||
self._make_attention_mask_non_null(inputs_dict)
|
|
||||||
|
|
||||||
tf_model_class = getattr(transformers, tf_model_class_name)
|
|
||||||
|
|
||||||
pt_model = model_class(config).eval()
|
|
||||||
tf_model = tf_model_class(config)
|
|
||||||
|
|
||||||
pt_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
|
|
||||||
pt_inputs_dict_with_labels = self._prepare_for_class(
|
|
||||||
inputs_dict,
|
|
||||||
model_class,
|
|
||||||
# Not all models accept "labels" in the forward pass (yet :) )
|
|
||||||
return_labels=True if "labels" in inspect.signature(model_class.forward).parameters.keys() else False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# make sure only tf inputs are forward that actually exist in function args
|
|
||||||
tf_input_keys = set(inspect.signature(tf_model.call).parameters.keys())
|
|
||||||
|
|
||||||
# remove all head masks
|
|
||||||
tf_input_keys.discard("head_mask")
|
|
||||||
tf_input_keys.discard("cross_attn_head_mask")
|
|
||||||
tf_input_keys.discard("decoder_head_mask")
|
|
||||||
|
|
||||||
pt_inputs_dict = {k: v for k, v in pt_inputs_dict.items() if k in tf_input_keys}
|
|
||||||
pt_inputs_dict_with_labels = {k: v for k, v in pt_inputs_dict_with_labels.items() if k in tf_input_keys}
|
|
||||||
|
|
||||||
# For some models (e.g. base models), there is no label returned.
|
|
||||||
# Set the input dict to `None` to avoid check outputs twice for the same input dicts.
|
|
||||||
if not set(pt_inputs_dict_with_labels.keys()).symmetric_difference(pt_inputs_dict.keys()):
|
|
||||||
pt_inputs_dict_with_labels = None
|
|
||||||
|
|
||||||
# Check we can load pt model in tf and vice-versa with model => model functions
|
|
||||||
# Here requires `tf_inputs_dict` to build `tf_model`
|
|
||||||
tf_inputs_dict = self.prepare_tf_inputs_from_pt_inputs(pt_inputs_dict)
|
|
||||||
tf_model = transformers.load_pytorch_model_in_tf2_model(
|
|
||||||
tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
pt_model = transformers.load_tf2_model_in_pytorch_model(
|
|
||||||
pt_model, tf_model, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
# Original test: check without `labels`
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict)
|
|
||||||
# check with `labels`
|
|
||||||
if pt_inputs_dict_with_labels:
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels)
|
|
||||||
|
|
||||||
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
|
|
||||||
torch.save(pt_model.state_dict(), pt_checkpoint_path)
|
|
||||||
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(
|
|
||||||
tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
|
|
||||||
tf_model.save_weights(tf_checkpoint_path)
|
|
||||||
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(
|
|
||||||
pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
# Original test: check without `labels`
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict)
|
|
||||||
# check with `labels`
|
|
||||||
if pt_inputs_dict_with_labels:
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, pt_inputs_dict_with_labels)
|
|
||||||
|
|
||||||
def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float):
|
def assert_almost_equals(self, a: np.ndarray, b: np.ndarray, tol: float):
|
||||||
diff = np.abs((a - b)).max()
|
diff = np.abs((a - b)).max()
|
||||||
self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).")
|
self.assertLessEqual(diff, tol, f"Difference between torch and flax is {diff} (>= {tol}).")
|
||||||
@ -4644,30 +4413,6 @@ class ModelTesterMixin:
|
|||||||
tol = torch.finfo(torch.float16).eps
|
tol = torch.finfo(torch.float16).eps
|
||||||
torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol)
|
torch.testing.assert_close(logits_padded, logits_padfree, rtol=tol, atol=tol)
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_tf_from_pt_safetensors(self):
|
|
||||||
for model_class in self.all_model_classes:
|
|
||||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
|
||||||
|
|
||||||
tf_model_class_name = "TF" + model_class.__name__ # Add the "TF" at the beginning
|
|
||||||
if not hasattr(transformers, tf_model_class_name):
|
|
||||||
self.skipTest(reason="transformers does not have this model in TF version yet")
|
|
||||||
|
|
||||||
tf_model_class = getattr(transformers, tf_model_class_name)
|
|
||||||
|
|
||||||
pt_model = model_class(config)
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
pt_model.save_pretrained(tmpdirname, safe_serialization=True)
|
|
||||||
tf_model_1 = tf_model_class.from_pretrained(tmpdirname, from_pt=True)
|
|
||||||
|
|
||||||
pt_model.save_pretrained(tmpdirname, safe_serialization=False)
|
|
||||||
tf_model_2 = tf_model_class.from_pretrained(tmpdirname, from_pt=True)
|
|
||||||
|
|
||||||
# Check models are equal
|
|
||||||
for p1, p2 in zip(tf_model_1.weights, tf_model_2.weights):
|
|
||||||
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
|
|
||||||
|
|
||||||
@is_pt_flax_cross_test
|
@is_pt_flax_cross_test
|
||||||
def test_flax_from_pt_safetensors(self):
|
def test_flax_from_pt_safetensors(self):
|
||||||
for model_class in self.all_model_classes:
|
for model_class in self.all_model_classes:
|
||||||
|
@ -29,12 +29,11 @@ from typing import List, Tuple
|
|||||||
|
|
||||||
from datasets import Dataset
|
from datasets import Dataset
|
||||||
|
|
||||||
from transformers import is_tf_available, is_torch_available
|
from transformers import is_tf_available
|
||||||
from transformers.models.auto import get_values
|
from transformers.models.auto import get_values
|
||||||
from transformers.testing_utils import ( # noqa: F401
|
from transformers.testing_utils import ( # noqa: F401
|
||||||
CaptureLogger,
|
CaptureLogger,
|
||||||
_tf_gpu_memory_limit,
|
_tf_gpu_memory_limit,
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_tf,
|
require_tf,
|
||||||
require_tf2onnx,
|
require_tf2onnx,
|
||||||
slow,
|
slow,
|
||||||
@ -88,9 +87,6 @@ if is_tf_available():
|
|||||||
# Virtual devices must be set before GPUs have been initialized
|
# Virtual devices must be set before GPUs have been initialized
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
if is_torch_available():
|
|
||||||
import torch
|
|
||||||
|
|
||||||
|
|
||||||
def _config_zero_init(config):
|
def _config_zero_init(config):
|
||||||
configs_no_init = copy.deepcopy(config)
|
configs_no_init = copy.deepcopy(config)
|
||||||
@ -474,215 +470,6 @@ class TFModelTesterMixin:
|
|||||||
|
|
||||||
return new_tf_outputs, new_pt_outputs
|
return new_tf_outputs, new_pt_outputs
|
||||||
|
|
||||||
def check_pt_tf_outputs(self, tf_outputs, pt_outputs, model_class, tol=1e-4, name="outputs", attributes=None):
|
|
||||||
"""Check the outputs from PyTorch and TensorFlow models are close enough. Checks are done in a recursive way.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
model_class: The class of the model that is currently testing. For example, `TFBertModel`,
|
|
||||||
TFBertForMaskedLM`, `TFBertForSequenceClassification`, etc. Mainly used for providing more informative
|
|
||||||
error messages.
|
|
||||||
name (`str`): The name of the output. For example, `output.hidden_states`, `output.attentions`, etc.
|
|
||||||
attributes (`Tuple[str]`): The names of the output's element if the output is a tuple/list with each element
|
|
||||||
being a named field in the output.
|
|
||||||
"""
|
|
||||||
from transformers.cache_utils import DynamicCache
|
|
||||||
|
|
||||||
self.assertEqual(type(name), str)
|
|
||||||
if attributes is not None:
|
|
||||||
self.assertEqual(type(attributes), tuple, f"{name}: The argument `attributes` should be a `tuple`")
|
|
||||||
|
|
||||||
# Allow `ModelOutput` (e.g. `CLIPOutput` has `text_model_output` and `vision_model_output`).
|
|
||||||
if isinstance(tf_outputs, ModelOutput):
|
|
||||||
self.assertTrue(
|
|
||||||
isinstance(pt_outputs, ModelOutput),
|
|
||||||
f"{name}: `pt_outputs` should an instance of `ModelOutput` when `tf_outputs` is",
|
|
||||||
)
|
|
||||||
|
|
||||||
# Don't copy this block to model specific test file!
|
|
||||||
# TODO: remove this method and this line after issues are fixed
|
|
||||||
tf_outputs, pt_outputs = self._postprocessing_to_ignore_test_cases(tf_outputs, pt_outputs, model_class)
|
|
||||||
|
|
||||||
tf_keys = [k for k, v in tf_outputs.items() if v is not None]
|
|
||||||
pt_keys = [k for k, v in pt_outputs.items() if v is not None]
|
|
||||||
|
|
||||||
self.assertEqual(tf_keys, pt_keys, f"{name}: Output keys differ between TF and PyTorch")
|
|
||||||
|
|
||||||
# convert to the case of `tuple`
|
|
||||||
# appending each key to the current (string) `names`
|
|
||||||
attributes = tuple([f"{name}.{k}" for k in tf_keys])
|
|
||||||
self.check_pt_tf_outputs(
|
|
||||||
tf_outputs.to_tuple(), pt_outputs.to_tuple(), model_class, tol=tol, name=name, attributes=attributes
|
|
||||||
)
|
|
||||||
|
|
||||||
# Allow `list` (e.g. `TransfoXLModelOutput.mems` is a list of tensors.)
|
|
||||||
elif type(tf_outputs) in [tuple, list]:
|
|
||||||
self.assertEqual(type(tf_outputs), type(pt_outputs), f"{name}: Output types differ between TF and PyTorch")
|
|
||||||
self.assertEqual(len(tf_outputs), len(pt_outputs), f"{name}: Output lengths differ between TF and PyTorch")
|
|
||||||
|
|
||||||
if attributes is not None:
|
|
||||||
# case 1: each output has assigned name (e.g. a tuple form of a `ModelOutput`)
|
|
||||||
self.assertEqual(
|
|
||||||
len(attributes),
|
|
||||||
len(tf_outputs),
|
|
||||||
f"{name}: The tuple `names` should have the same length as `tf_outputs`",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
# case 2: each output has no assigned name (e.g. hidden states of each layer) -> add an index to `names`
|
|
||||||
attributes = tuple([f"{name}_{idx}" for idx in range(len(tf_outputs))])
|
|
||||||
|
|
||||||
for tf_output, pt_output, attr in zip(tf_outputs, pt_outputs, attributes):
|
|
||||||
if isinstance(pt_output, DynamicCache):
|
|
||||||
pt_output = pt_output.to_legacy_cache()
|
|
||||||
self.check_pt_tf_outputs(tf_output, pt_output, model_class, tol=tol, name=attr)
|
|
||||||
|
|
||||||
elif isinstance(tf_outputs, tf.Tensor):
|
|
||||||
self.assertTrue(
|
|
||||||
isinstance(pt_outputs, torch.Tensor), f"{name}: `pt_outputs` should a tensor when `tf_outputs` is"
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_outputs = tf_outputs.numpy()
|
|
||||||
pt_outputs = pt_outputs.detach().to("cpu").numpy()
|
|
||||||
|
|
||||||
self.assertEqual(
|
|
||||||
tf_outputs.shape, pt_outputs.shape, f"{name}: Output shapes differ between TF and PyTorch"
|
|
||||||
)
|
|
||||||
|
|
||||||
# deal with NumPy's scalars to make replacing nan values by 0 work.
|
|
||||||
if np.isscalar(tf_outputs):
|
|
||||||
tf_outputs = np.array([tf_outputs])
|
|
||||||
pt_outputs = np.array([pt_outputs])
|
|
||||||
|
|
||||||
tf_nans = np.isnan(tf_outputs)
|
|
||||||
pt_nans = np.isnan(pt_outputs)
|
|
||||||
|
|
||||||
pt_outputs[tf_nans] = 0
|
|
||||||
tf_outputs[tf_nans] = 0
|
|
||||||
pt_outputs[pt_nans] = 0
|
|
||||||
tf_outputs[pt_nans] = 0
|
|
||||||
|
|
||||||
max_diff = np.amax(np.abs(tf_outputs - pt_outputs))
|
|
||||||
self.assertLessEqual(max_diff, tol, f"{name}: Difference between torch and tf is {max_diff} (>= {tol}).")
|
|
||||||
else:
|
|
||||||
raise ValueError(
|
|
||||||
"`tf_outputs` should be an instance of `tf.Tensor`, a `tuple`, or an instance of `tf.Tensor`. Got"
|
|
||||||
f" {type(tf_outputs)} instead."
|
|
||||||
)
|
|
||||||
|
|
||||||
def prepare_pt_inputs_from_tf_inputs(self, tf_inputs_dict):
|
|
||||||
pt_inputs_dict = {}
|
|
||||||
for name, key in tf_inputs_dict.items():
|
|
||||||
if isinstance(key, bool):
|
|
||||||
pt_inputs_dict[name] = key
|
|
||||||
elif name == "input_values":
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
|
|
||||||
elif name == "pixel_values":
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
|
|
||||||
elif name == "input_features":
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
|
|
||||||
# other general float inputs
|
|
||||||
elif tf_inputs_dict[name].dtype.is_floating:
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.float32)
|
|
||||||
else:
|
|
||||||
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
|
|
||||||
|
|
||||||
return pt_inputs_dict
|
|
||||||
|
|
||||||
def check_pt_tf_models(self, tf_model, pt_model, tf_inputs_dict):
|
|
||||||
pt_inputs_dict = self.prepare_pt_inputs_from_tf_inputs(tf_inputs_dict)
|
|
||||||
|
|
||||||
# send pytorch inputs to the correct device
|
|
||||||
pt_inputs_dict = {
|
|
||||||
k: v.to(device=torch_device) if isinstance(v, torch.Tensor) else v for k, v in pt_inputs_dict.items()
|
|
||||||
}
|
|
||||||
|
|
||||||
# send pytorch model to the correct device
|
|
||||||
pt_model.to(torch_device)
|
|
||||||
|
|
||||||
# Check predictions on first output (logits/hidden-states) are close enough given low-level computational differences
|
|
||||||
pt_model.eval()
|
|
||||||
|
|
||||||
with torch.no_grad():
|
|
||||||
pt_outputs = pt_model(**pt_inputs_dict)
|
|
||||||
tf_outputs = tf_model(tf_inputs_dict)
|
|
||||||
|
|
||||||
# tf models returned loss is usually a tensor rather than a scalar.
|
|
||||||
# (see `hf_compute_loss`: it uses `keras.losses.Reduction.NONE`)
|
|
||||||
# Change it here to a scalar to match PyTorch models' loss
|
|
||||||
tf_loss = getattr(tf_outputs, "loss", None)
|
|
||||||
if tf_loss is not None:
|
|
||||||
tf_outputs.loss = tf.math.reduce_mean(tf_loss)
|
|
||||||
|
|
||||||
self.check_pt_tf_outputs(tf_outputs, pt_outputs, type(tf_model))
|
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_pt_tf_model_equivalence(self, allow_missing_keys=False):
|
|
||||||
import transformers
|
|
||||||
|
|
||||||
for model_class in self.all_model_classes:
|
|
||||||
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
|
|
||||||
|
|
||||||
# Output all for aggressive testing
|
|
||||||
config.output_hidden_states = True
|
|
||||||
config.output_attentions = self.has_attentions
|
|
||||||
|
|
||||||
# Make sure no sequence has all zeros as attention mask, otherwise some tests fail due to the inconsistency
|
|
||||||
# of the usage `1e-4`, `1e-9`, `1e-30`, `-inf`.
|
|
||||||
# TODO: Use a uniform value for all models, make sure all tests pass without this processing, and remove it.
|
|
||||||
self._make_attention_mask_non_null(inputs_dict)
|
|
||||||
|
|
||||||
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning
|
|
||||||
pt_model_class = getattr(transformers, pt_model_class_name)
|
|
||||||
|
|
||||||
tf_model = model_class(config)
|
|
||||||
pt_model = pt_model_class(config)
|
|
||||||
|
|
||||||
tf_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
|
|
||||||
tf_inputs_dict_with_labels = self._prepare_for_class(
|
|
||||||
inputs_dict,
|
|
||||||
model_class,
|
|
||||||
# Not all models accept "labels" in the forward pass (yet :) )
|
|
||||||
return_labels=True if "labels" in inspect.signature(model_class.call).parameters.keys() else False,
|
|
||||||
)
|
|
||||||
|
|
||||||
# For some models (e.g. base models), there is no label returned.
|
|
||||||
# Set the input dict to `None` to avoid check outputs twice for the same input dicts.
|
|
||||||
if not set(tf_inputs_dict_with_labels.keys()).symmetric_difference(tf_inputs_dict.keys()):
|
|
||||||
tf_inputs_dict_with_labels = None
|
|
||||||
|
|
||||||
# Check we can load pt model in tf and vice-versa with model => model functions
|
|
||||||
tf_model = transformers.load_pytorch_model_in_tf2_model(
|
|
||||||
tf_model, pt_model, tf_inputs=tf_inputs_dict, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
pt_model = transformers.load_tf2_model_in_pytorch_model(
|
|
||||||
pt_model, tf_model, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
# Original test: check without `labels`
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
# check with `labels`
|
|
||||||
if tf_inputs_dict_with_labels:
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels)
|
|
||||||
|
|
||||||
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
|
|
||||||
with tempfile.TemporaryDirectory() as tmpdirname:
|
|
||||||
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
|
|
||||||
torch.save(pt_model.state_dict(), pt_checkpoint_path)
|
|
||||||
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(
|
|
||||||
tf_model, pt_checkpoint_path, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
|
|
||||||
tf_model.save_weights(tf_checkpoint_path)
|
|
||||||
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(
|
|
||||||
pt_model, tf_checkpoint_path, allow_missing_keys=allow_missing_keys
|
|
||||||
)
|
|
||||||
|
|
||||||
# Original test: check without `labels`
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict)
|
|
||||||
# check with `labels`
|
|
||||||
if tf_inputs_dict_with_labels:
|
|
||||||
self.check_pt_tf_models(tf_model, pt_model, tf_inputs_dict_with_labels)
|
|
||||||
|
|
||||||
@slow
|
@slow
|
||||||
def test_compile_tf_model(self):
|
def test_compile_tf_model(self):
|
||||||
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
|
||||||
|
@ -49,7 +49,6 @@ from transformers import (
|
|||||||
from transformers.testing_utils import (
|
from transformers.testing_utils import (
|
||||||
check_json_file_has_correct_format,
|
check_json_file_has_correct_format,
|
||||||
get_tests_dir,
|
get_tests_dir,
|
||||||
is_pt_tf_cross_test,
|
|
||||||
require_jinja,
|
require_jinja,
|
||||||
require_read_token,
|
require_read_token,
|
||||||
require_tf,
|
require_tf,
|
||||||
@ -2971,48 +2970,6 @@ class TokenizerTesterMixin:
|
|||||||
string_sequences, return_overflowing_tokens=True, truncation=True, padding=True, max_length=3
|
string_sequences, return_overflowing_tokens=True, truncation=True, padding=True, max_length=3
|
||||||
)
|
)
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_batch_encode_plus_tensors(self):
|
|
||||||
tokenizers = self.get_tokenizers(do_lower_case=False)
|
|
||||||
for tokenizer in tokenizers:
|
|
||||||
with self.subTest(f"{tokenizer.__class__.__name__}"):
|
|
||||||
sequences = [
|
|
||||||
"Testing batch encode plus",
|
|
||||||
"Testing batch encode plus with different sequence lengths",
|
|
||||||
"Testing batch encode plus with different sequence lengths correctly pads",
|
|
||||||
]
|
|
||||||
|
|
||||||
# A Tensor cannot be build by sequences which are not the same size
|
|
||||||
self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, return_tensors="pt")
|
|
||||||
self.assertRaises(ValueError, tokenizer.batch_encode_plus, sequences, return_tensors="tf")
|
|
||||||
|
|
||||||
if tokenizer.pad_token_id is None:
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus,
|
|
||||||
sequences,
|
|
||||||
padding=True,
|
|
||||||
return_tensors="pt",
|
|
||||||
)
|
|
||||||
self.assertRaises(
|
|
||||||
ValueError,
|
|
||||||
tokenizer.batch_encode_plus,
|
|
||||||
sequences,
|
|
||||||
padding="longest",
|
|
||||||
return_tensors="tf",
|
|
||||||
)
|
|
||||||
else:
|
|
||||||
pytorch_tensor = tokenizer.batch_encode_plus(sequences, padding=True, return_tensors="pt")
|
|
||||||
tensorflow_tensor = tokenizer.batch_encode_plus(sequences, padding="longest", return_tensors="tf")
|
|
||||||
encoded_sequences = tokenizer.batch_encode_plus(sequences, padding=True)
|
|
||||||
|
|
||||||
for key in encoded_sequences.keys():
|
|
||||||
pytorch_value = pytorch_tensor[key].tolist()
|
|
||||||
tensorflow_value = tensorflow_tensor[key].numpy().tolist()
|
|
||||||
encoded_value = encoded_sequences[key]
|
|
||||||
|
|
||||||
self.assertEqual(pytorch_value, tensorflow_value, encoded_value)
|
|
||||||
|
|
||||||
def _check_no_pad_token_padding(self, tokenizer, sequences):
|
def _check_no_pad_token_padding(self, tokenizer, sequences):
|
||||||
# if tokenizer does not have pad_token_id, an error should be thrown
|
# if tokenizer does not have pad_token_id, an error should be thrown
|
||||||
if tokenizer.pad_token_id is None:
|
if tokenizer.pad_token_id is None:
|
||||||
|
@ -736,55 +736,6 @@ NEW_BERT_CONSTANT = "value"
|
|||||||
self.assertIsNone(bert_model_patterns.feature_extractor_class)
|
self.assertIsNone(bert_model_patterns.feature_extractor_class)
|
||||||
self.assertIsNone(bert_model_patterns.processor_class)
|
self.assertIsNone(bert_model_patterns.processor_class)
|
||||||
|
|
||||||
def test_retrieve_info_for_model_pt_tf_with_bert(self):
|
|
||||||
bert_info = retrieve_info_for_model("bert", frameworks=["pt", "tf"])
|
|
||||||
bert_classes = [
|
|
||||||
"BertForTokenClassification",
|
|
||||||
"BertForQuestionAnswering",
|
|
||||||
"BertForNextSentencePrediction",
|
|
||||||
"BertForSequenceClassification",
|
|
||||||
"BertForMaskedLM",
|
|
||||||
"BertForMultipleChoice",
|
|
||||||
"BertModel",
|
|
||||||
"BertForPreTraining",
|
|
||||||
"BertLMHeadModel",
|
|
||||||
]
|
|
||||||
expected_model_classes = {"pt": set(bert_classes), "tf": {f"TF{m}" for m in bert_classes}}
|
|
||||||
|
|
||||||
self.assertEqual(set(bert_info["frameworks"]), {"pt", "tf"})
|
|
||||||
model_classes = {k: set(v) for k, v in bert_info["model_classes"].items()}
|
|
||||||
self.assertEqual(model_classes, expected_model_classes)
|
|
||||||
|
|
||||||
all_bert_files = bert_info["model_files"]
|
|
||||||
model_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_bert_files["model_files"]}
|
|
||||||
bert_model_files = BERT_MODEL_FILES - {"src/transformers/models/bert/modeling_flax_bert.py"}
|
|
||||||
self.assertEqual(model_files, bert_model_files)
|
|
||||||
|
|
||||||
test_files = {str(Path(f).relative_to(REPO_PATH)) for f in all_bert_files["test_files"]}
|
|
||||||
bert_test_files = {
|
|
||||||
"tests/models/bert/test_tokenization_bert.py",
|
|
||||||
"tests/models/bert/test_modeling_bert.py",
|
|
||||||
"tests/models/bert/test_modeling_tf_bert.py",
|
|
||||||
}
|
|
||||||
self.assertEqual(test_files, bert_test_files)
|
|
||||||
|
|
||||||
doc_file = str(Path(all_bert_files["doc_file"]).relative_to(REPO_PATH))
|
|
||||||
self.assertEqual(doc_file, "docs/source/en/model_doc/bert.md")
|
|
||||||
|
|
||||||
self.assertEqual(all_bert_files["module_name"], "bert")
|
|
||||||
|
|
||||||
bert_model_patterns = bert_info["model_patterns"]
|
|
||||||
self.assertEqual(bert_model_patterns.model_name, "BERT")
|
|
||||||
self.assertEqual(bert_model_patterns.checkpoint, "google-bert/bert-base-uncased")
|
|
||||||
self.assertEqual(bert_model_patterns.model_type, "bert")
|
|
||||||
self.assertEqual(bert_model_patterns.model_lower_cased, "bert")
|
|
||||||
self.assertEqual(bert_model_patterns.model_camel_cased, "Bert")
|
|
||||||
self.assertEqual(bert_model_patterns.model_upper_cased, "BERT")
|
|
||||||
self.assertEqual(bert_model_patterns.config_class, "BertConfig")
|
|
||||||
self.assertEqual(bert_model_patterns.tokenizer_class, "BertTokenizer")
|
|
||||||
self.assertIsNone(bert_model_patterns.feature_extractor_class)
|
|
||||||
self.assertIsNone(bert_model_patterns.processor_class)
|
|
||||||
|
|
||||||
def test_retrieve_info_for_model_with_vit(self):
|
def test_retrieve_info_for_model_with_vit(self):
|
||||||
vit_info = retrieve_info_for_model("vit")
|
vit_info = retrieve_info_for_model("vit")
|
||||||
vit_classes = ["ViTForImageClassification", "ViTModel"]
|
vit_classes = ["ViTForImageClassification", "ViTModel"]
|
||||||
|
@ -16,7 +16,6 @@
|
|||||||
|
|
||||||
from __future__ import annotations
|
from __future__ import annotations
|
||||||
|
|
||||||
import inspect
|
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
@ -24,10 +23,10 @@ import tempfile
|
|||||||
import unittest
|
import unittest
|
||||||
import unittest.mock as mock
|
import unittest.mock as mock
|
||||||
|
|
||||||
from huggingface_hub import HfFolder, Repository, snapshot_download
|
from huggingface_hub import HfFolder, snapshot_download
|
||||||
from requests.exceptions import HTTPError
|
from requests.exceptions import HTTPError
|
||||||
|
|
||||||
from transformers import is_tf_available, is_torch_available
|
from transformers import is_tf_available
|
||||||
from transformers.configuration_utils import PretrainedConfig
|
from transformers.configuration_utils import PretrainedConfig
|
||||||
from transformers.testing_utils import ( # noqa: F401
|
from transformers.testing_utils import ( # noqa: F401
|
||||||
TOKEN,
|
TOKEN,
|
||||||
@ -35,11 +34,9 @@ from transformers.testing_utils import ( # noqa: F401
|
|||||||
CaptureLogger,
|
CaptureLogger,
|
||||||
TemporaryHubRepo,
|
TemporaryHubRepo,
|
||||||
_tf_gpu_memory_limit,
|
_tf_gpu_memory_limit,
|
||||||
is_pt_tf_cross_test,
|
|
||||||
is_staging_test,
|
is_staging_test,
|
||||||
require_safetensors,
|
require_safetensors,
|
||||||
require_tf,
|
require_tf,
|
||||||
require_torch,
|
|
||||||
slow,
|
slow,
|
||||||
)
|
)
|
||||||
from transformers.utils import (
|
from transformers.utils import (
|
||||||
@ -61,14 +58,9 @@ if is_tf_available():
|
|||||||
|
|
||||||
from transformers import (
|
from transformers import (
|
||||||
BertConfig,
|
BertConfig,
|
||||||
PreTrainedModel,
|
|
||||||
PushToHubCallback,
|
|
||||||
RagRetriever,
|
RagRetriever,
|
||||||
TFAutoModel,
|
|
||||||
TFBertForMaskedLM,
|
|
||||||
TFBertForSequenceClassification,
|
TFBertForSequenceClassification,
|
||||||
TFBertModel,
|
TFBertModel,
|
||||||
TFPreTrainedModel,
|
|
||||||
TFRagModel,
|
TFRagModel,
|
||||||
)
|
)
|
||||||
from transformers.modeling_tf_utils import keras, tf_shard_checkpoint, unpack_inputs
|
from transformers.modeling_tf_utils import keras, tf_shard_checkpoint, unpack_inputs
|
||||||
@ -90,9 +82,6 @@ if is_tf_available():
|
|||||||
# Virtual devices must be set before GPUs have been initialized
|
# Virtual devices must be set before GPUs have been initialized
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
if is_torch_available():
|
|
||||||
from transformers import BertModel
|
|
||||||
|
|
||||||
|
|
||||||
@require_tf
|
@require_tf
|
||||||
class TFModelUtilsTest(unittest.TestCase):
|
class TFModelUtilsTest(unittest.TestCase):
|
||||||
@ -241,34 +230,6 @@ class TFModelUtilsTest(unittest.TestCase):
|
|||||||
# If this doesn't throw an error then the test passes
|
# If this doesn't throw an error then the test passes
|
||||||
TFBertForSequenceClassification.from_pretrained("ArthurZ/tiny-random-bert-sharded")
|
TFBertForSequenceClassification.from_pretrained("ArthurZ/tiny-random-bert-sharded")
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_checkpoint_sharding_local_from_pt(self):
|
|
||||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
||||||
_ = Repository(local_dir=tmp_dir, clone_from="hf-internal-testing/tiny-random-bert-sharded")
|
|
||||||
model = TFBertModel.from_pretrained(tmp_dir, from_pt=True)
|
|
||||||
# the model above is the same as the model below, just a sharded pytorch version.
|
|
||||||
ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
|
|
||||||
for p1, p2 in zip(model.weights, ref_model.weights):
|
|
||||||
assert np.allclose(p1.numpy(), p2.numpy())
|
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_checkpoint_loading_with_prefix_from_pt(self):
|
|
||||||
model = TFBertModel.from_pretrained(
|
|
||||||
"hf-internal-testing/tiny-random-bert", from_pt=True, load_weight_prefix="a/b"
|
|
||||||
)
|
|
||||||
ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert", from_pt=True)
|
|
||||||
for p1, p2 in zip(model.weights, ref_model.weights):
|
|
||||||
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
|
|
||||||
self.assertTrue(p1.name.startswith("a/b/"))
|
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_checkpoint_sharding_hub_from_pt(self):
|
|
||||||
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert-sharded", from_pt=True)
|
|
||||||
# the model above is the same as the model below, just a sharded pytorch version.
|
|
||||||
ref_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
|
|
||||||
for p1, p2 in zip(model.weights, ref_model.weights):
|
|
||||||
assert np.allclose(p1.numpy(), p2.numpy())
|
|
||||||
|
|
||||||
def test_shard_checkpoint(self):
|
def test_shard_checkpoint(self):
|
||||||
# This is the model we will use, total size 340,000 bytes.
|
# This is the model we will use, total size 340,000 bytes.
|
||||||
model = keras.Sequential(
|
model = keras.Sequential(
|
||||||
@ -437,16 +398,6 @@ class TFModelUtilsTest(unittest.TestCase):
|
|||||||
for p1, p2 in zip(model.weights, new_model.weights):
|
for p1, p2 in zip(model.weights, new_model.weights):
|
||||||
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
|
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
@require_safetensors
|
|
||||||
def test_bfloat16_torch_loading(self):
|
|
||||||
# Assert that neither of these raise an error - both repos contain bfloat16 tensors
|
|
||||||
model1 = TFAutoModel.from_pretrained("Rocketknight1/tiny-random-gpt2-bfloat16-pt", from_pt=True)
|
|
||||||
model2 = TFAutoModel.from_pretrained("Rocketknight1/tiny-random-gpt2-bfloat16") # PT-format safetensors
|
|
||||||
# Check that PT and safetensors loading paths end up with the same values
|
|
||||||
for weight1, weight2 in zip(model1.weights, model2.weights):
|
|
||||||
self.assertTrue(tf.reduce_all(weight1 == weight2))
|
|
||||||
|
|
||||||
@slow
|
@slow
|
||||||
def test_save_pretrained_signatures(self):
|
def test_save_pretrained_signatures(self):
|
||||||
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
|
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
|
||||||
@ -522,36 +473,6 @@ class TFModelUtilsTest(unittest.TestCase):
|
|||||||
for p1, p2 in zip(model.weights, new_model.weights):
|
for p1, p2 in zip(model.weights, new_model.weights):
|
||||||
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
|
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_safetensors_save_and_load_pt_to_tf(self):
|
|
||||||
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
|
|
||||||
pt_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
|
|
||||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
||||||
pt_model.save_pretrained(tmp_dir, safe_serialization=True)
|
|
||||||
# Check we have a model.safetensors file
|
|
||||||
self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_NAME)))
|
|
||||||
|
|
||||||
new_model = TFBertModel.from_pretrained(tmp_dir)
|
|
||||||
|
|
||||||
# Check models are equal
|
|
||||||
for p1, p2 in zip(model.weights, new_model.weights):
|
|
||||||
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
|
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_sharded_safetensors_save_and_load_pt_to_tf(self):
|
|
||||||
model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
|
|
||||||
pt_model = BertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
|
|
||||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
||||||
pt_model.save_pretrained(tmp_dir, safe_serialization=True, max_shard_size="150kB")
|
|
||||||
# Check we have a safetensors shard index file
|
|
||||||
self.assertTrue(os.path.isfile(os.path.join(tmp_dir, SAFE_WEIGHTS_INDEX_NAME)))
|
|
||||||
|
|
||||||
new_model = TFBertModel.from_pretrained(tmp_dir)
|
|
||||||
|
|
||||||
# Check models are equal
|
|
||||||
for p1, p2 in zip(model.weights, new_model.weights):
|
|
||||||
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
|
|
||||||
|
|
||||||
@require_safetensors
|
@require_safetensors
|
||||||
def test_safetensors_load_from_hub(self):
|
def test_safetensors_load_from_hub(self):
|
||||||
tf_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
|
tf_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-random-bert")
|
||||||
@ -581,19 +502,6 @@ class TFModelUtilsTest(unittest.TestCase):
|
|||||||
for p1, p2 in zip(model.weights, new_model.weights):
|
for p1, p2 in zip(model.weights, new_model.weights):
|
||||||
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
|
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
|
||||||
|
|
||||||
@require_safetensors
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_safetensors_tf_from_torch(self):
|
|
||||||
hub_model = TFBertModel.from_pretrained("hf-internal-testing/tiny-bert-tf-only")
|
|
||||||
model = BertModel.from_pretrained("hf-internal-testing/tiny-bert-pt-only")
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
||||||
model.save_pretrained(tmp_dir, safe_serialization=True)
|
|
||||||
new_model = TFBertModel.from_pretrained(tmp_dir)
|
|
||||||
|
|
||||||
for p1, p2 in zip(hub_model.weights, new_model.weights):
|
|
||||||
self.assertTrue(np.allclose(p1.numpy(), p2.numpy()))
|
|
||||||
|
|
||||||
@require_safetensors
|
@require_safetensors
|
||||||
def test_safetensors_tf_from_sharded_h5_with_sharded_safetensors_local(self):
|
def test_safetensors_tf_from_sharded_h5_with_sharded_safetensors_local(self):
|
||||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
with tempfile.TemporaryDirectory() as tmp_dir:
|
||||||
@ -729,37 +637,6 @@ class TFModelPushToHubTester(unittest.TestCase):
|
|||||||
break
|
break
|
||||||
self.assertTrue(models_equal)
|
self.assertTrue(models_equal)
|
||||||
|
|
||||||
@is_pt_tf_cross_test
|
|
||||||
def test_push_to_hub_callback(self):
|
|
||||||
with TemporaryHubRepo(token=self._token) as tmp_repo:
|
|
||||||
config = BertConfig(
|
|
||||||
vocab_size=99, hidden_size=32, num_hidden_layers=5, num_attention_heads=4, intermediate_size=37
|
|
||||||
)
|
|
||||||
model = TFBertForMaskedLM(config)
|
|
||||||
model.compile()
|
|
||||||
|
|
||||||
with tempfile.TemporaryDirectory() as tmp_dir:
|
|
||||||
push_to_hub_callback = PushToHubCallback(
|
|
||||||
output_dir=tmp_dir,
|
|
||||||
hub_model_id=tmp_repo.repo_id,
|
|
||||||
hub_token=self._token,
|
|
||||||
)
|
|
||||||
model.fit(model.dummy_inputs, model.dummy_inputs, epochs=1, callbacks=[push_to_hub_callback])
|
|
||||||
|
|
||||||
new_model = TFBertForMaskedLM.from_pretrained(tmp_repo.repo_id)
|
|
||||||
models_equal = True
|
|
||||||
for p1, p2 in zip(model.weights, new_model.weights):
|
|
||||||
if not tf.math.reduce_all(p1 == p2):
|
|
||||||
models_equal = False
|
|
||||||
break
|
|
||||||
self.assertTrue(models_equal)
|
|
||||||
|
|
||||||
tf_push_to_hub_params = dict(inspect.signature(TFPreTrainedModel.push_to_hub).parameters)
|
|
||||||
tf_push_to_hub_params.pop("base_model_card_args")
|
|
||||||
pt_push_to_hub_params = dict(inspect.signature(PreTrainedModel.push_to_hub).parameters)
|
|
||||||
pt_push_to_hub_params.pop("deprecated_kwargs")
|
|
||||||
self.assertDictEaual(tf_push_to_hub_params, pt_push_to_hub_params)
|
|
||||||
|
|
||||||
def test_push_to_hub_in_organization(self):
|
def test_push_to_hub_in_organization(self):
|
||||||
with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo:
|
with TemporaryHubRepo(namespace="valid_org", token=self._token) as tmp_repo:
|
||||||
config = BertConfig(
|
config = BertConfig(
|
||||||
|
@ -1148,7 +1148,6 @@ def parse_commit_message(commit_message: str) -> Dict[str, bool]:
|
|||||||
|
|
||||||
|
|
||||||
JOB_TO_TEST_FILE = {
|
JOB_TO_TEST_FILE = {
|
||||||
"tests_torch_and_tf": r"tests/models/.*/test_modeling_(?:tf_|(?!flax)).*",
|
|
||||||
"tests_torch_and_flax": r"tests/models/.*/test_modeling_(?:flax|(?!tf)).*",
|
"tests_torch_and_flax": r"tests/models/.*/test_modeling_(?:flax|(?!tf)).*",
|
||||||
"tests_tf": r"tests/models/.*/test_modeling_tf_.*",
|
"tests_tf": r"tests/models/.*/test_modeling_tf_.*",
|
||||||
"tests_torch": r"tests/models/.*/test_modeling_(?!(?:flax_|tf_)).*",
|
"tests_torch": r"tests/models/.*/test_modeling_(?!(?:flax_|tf_)).*",
|
||||||
|
Loading…
Reference in New Issue
Block a user