diff --git a/.github/workflows/self-push.yml b/.github/workflows/self-push.yml index 5ac040b9202..9f0f2d06a58 100644 --- a/.github/workflows/self-push.yml +++ b/.github/workflows/self-push.yml @@ -51,7 +51,8 @@ jobs: - name: Are GPUs recognized by our DL frameworks run: | source .env/bin/activate - python -c "import torch; print(torch.cuda.is_available())" + python -c "import torch; print('Cuda available:', torch.cuda.is_available())" + python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())" - name: Run all non-slow tests on GPU env: diff --git a/.github/workflows/self-scheduled.yml b/.github/workflows/self-scheduled.yml index e70be8cd090..d6c89a22ac9 100644 --- a/.github/workflows/self-scheduled.yml +++ b/.github/workflows/self-scheduled.yml @@ -48,7 +48,9 @@ jobs: - name: Are GPUs recognized by our DL frameworks run: | source .env/bin/activate - python -c "import torch; print(torch.cuda.is_available())" + python -c "import torch; print('Cuda available:', torch.cuda.is_available())" + python -c "import torch; print('Number of GPUs available:', torch.cuda.device_count())" + - name: Run all tests on GPU env: