mirror of
https://github.com/huggingface/transformers.git
synced 2025-07-30 17:52:35 +06:00
deprecate no_cuda (#24863)
* deprecate no_cuda * style * remove doc * remove doc 2 * fix style
This commit is contained in:
parent
0f4502d335
commit
9dc965bb40
@ -297,8 +297,8 @@ class TrainingArguments:
|
||||
|
||||
This should not be activated when the different nodes use the same storage as the files will be saved with
|
||||
the same names for each node.
|
||||
no_cuda (`bool`, *optional*, defaults to `False`):
|
||||
Whether to not use CUDA even when it is available or not.
|
||||
use_cpu (`bool`, *optional*, defaults to `False`):
|
||||
Whether or not to use cpu. If set to False, we will use cuda or mps device if available.
|
||||
seed (`int`, *optional*, defaults to 42):
|
||||
Random seed that will be set at the beginning of training. To ensure reproducibility across runs, use the
|
||||
[`~Trainer.model_init`] function to instantiate the model if it has some randomly initialized parameters.
|
||||
@ -313,7 +313,7 @@ class TrainingArguments:
|
||||
installation](https://github.com/intel/intel-extension-for-pytorch).
|
||||
bf16 (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use bf16 16-bit (mixed) precision training instead of 32-bit training. Requires Ampere or higher
|
||||
NVIDIA architecture or using CPU (no_cuda). This is an experimental API and it may change.
|
||||
NVIDIA architecture or using CPU (use_cpu). This is an experimental API and it may change.
|
||||
fp16 (`bool`, *optional*, defaults to `False`):
|
||||
Whether to use fp16 16-bit (mixed) precision training instead of 32-bit training.
|
||||
fp16_opt_level (`str`, *optional*, defaults to 'O1'):
|
||||
@ -793,7 +793,14 @@ class TrainingArguments:
|
||||
)
|
||||
},
|
||||
)
|
||||
no_cuda: bool = field(default=False, metadata={"help": "Do not use CUDA even when it is available"})
|
||||
no_cuda: bool = field(
|
||||
default=False,
|
||||
metadata={"help": "This argument is deprecated. It will be removed in version 5.0 of 🤗 Transformers."},
|
||||
)
|
||||
use_cpu: bool = field(
|
||||
default=False,
|
||||
metadata={"help": " Whether or not to use cpu. If set to False, we will use cuda or mps device if available."},
|
||||
)
|
||||
use_mps_device: bool = field(
|
||||
default=False,
|
||||
metadata={
|
||||
@ -820,7 +827,7 @@ class TrainingArguments:
|
||||
metadata={
|
||||
"help": (
|
||||
"Whether to use bf16 (mixed) precision instead of 32-bit. Requires Ampere or higher NVIDIA"
|
||||
" architecture or using CPU (no_cuda). This is an experimental API and it may change."
|
||||
" architecture or using CPU (use_cpu). This is an experimental API and it may change."
|
||||
)
|
||||
},
|
||||
)
|
||||
@ -1211,6 +1218,13 @@ class TrainingArguments:
|
||||
)
|
||||
# Go back to the underlying string or we won't be able to instantiate `IntervalStrategy` on it.
|
||||
self.evaluation_strategy = self.evaluation_strategy.value
|
||||
if self.no_cuda:
|
||||
warnings.warn(
|
||||
"using `no_cuda` is deprecated and will be removed in version 5.0 of 🤗 Transformers. "
|
||||
"Use `use_cpu` instead",
|
||||
FutureWarning,
|
||||
)
|
||||
self.use_cpu = self.no_cuda
|
||||
|
||||
self.evaluation_strategy = IntervalStrategy(self.evaluation_strategy)
|
||||
self.logging_strategy = IntervalStrategy(self.logging_strategy)
|
||||
@ -1305,10 +1319,10 @@ class TrainingArguments:
|
||||
self.half_precision_backend = self.fp16_backend
|
||||
|
||||
if self.bf16 or self.bf16_full_eval:
|
||||
if self.no_cuda and not is_torch_bf16_cpu_available() and not is_torch_tpu_available():
|
||||
if self.use_cpu and not is_torch_bf16_cpu_available() and not is_torch_tpu_available():
|
||||
# cpu
|
||||
raise ValueError("Your setup doesn't support bf16/(cpu, tpu, neuroncore). You need torch>=1.10")
|
||||
elif not self.no_cuda and torch.cuda.is_available() and not is_torch_bf16_gpu_available():
|
||||
elif not self.use_cpu and torch.cuda.is_available() and not is_torch_bf16_gpu_available():
|
||||
# gpu
|
||||
raise ValueError(
|
||||
"Your setup doesn't support bf16/gpu. You need torch>=1.10, using Ampere GPU with cuda>=11.0"
|
||||
@ -1702,7 +1716,7 @@ class TrainingArguments:
|
||||
)
|
||||
AcceleratorState._reset_state(reset_partial_state=True)
|
||||
self.distributed_state = None
|
||||
if self.no_cuda:
|
||||
if self.use_cpu:
|
||||
self.distributed_state = PartialState(cpu=True, backend=self.ddp_backend)
|
||||
self._n_gpu = 0
|
||||
elif is_sagemaker_mp_enabled():
|
||||
@ -1752,7 +1766,7 @@ class TrainingArguments:
|
||||
)
|
||||
if device.type == "mps":
|
||||
self._n_gpu = 1
|
||||
elif self.no_cuda:
|
||||
elif self.use_cpu:
|
||||
device = torch.device("cpu")
|
||||
self._n_gpu = 0
|
||||
else:
|
||||
|
Loading…
Reference in New Issue
Block a user