Fix E722 flake8 warnings (x26).

This commit is contained in:
Aymeric Augustin 2019-12-21 20:22:05 +01:00
parent b0f7db73cd
commit 631be27078
20 changed files with 43 additions and 41 deletions

View File

@ -44,7 +44,7 @@ from transformers import (
try:
from torch.utils.tensorboard import SummaryWriter
except:
except ImportError:
from tensorboardX import SummaryWriter

View File

@ -37,7 +37,7 @@ from utils import logger
try:
from torch.utils.tensorboard import SummaryWriter
except:
except ImportError:
from tensorboardX import SummaryWriter

View File

@ -67,7 +67,7 @@ from ..utils_squad_evaluate import main as evaluate_on_squad
try:
from torch.utils.tensorboard import SummaryWriter
except:
except ImportError:
from tensorboardX import SummaryWriter

View File

@ -62,7 +62,7 @@ from utils_mmimdb import ImageEncoder, JsonlDataset, collate_fn, get_image_trans
try:
from torch.utils.tensorboard import SummaryWriter
except:
except ImportError:
from tensorboardX import SummaryWriter

View File

@ -697,8 +697,8 @@ def run_pplm_example(
print("= Perturbed generated text {} =".format(i + 1))
print(pert_gen_text)
print()
except:
pass
except Exception as exc:
print("Ignoring error while generating perturbed text:", exc)
# keep the prefix, perturbed seq, original seq for each index
generated_texts.append((tokenized_cond_text, pert_gen_tok_text, unpert_gen_tok_text))

View File

@ -285,7 +285,7 @@ def train_discriminator(
for i, line in enumerate(f):
try:
data.append(eval(line))
except:
except Exception:
print("Error evaluating line {}: {}".format(i, line))
continue
x = []
@ -303,7 +303,7 @@ def train_discriminator(
continue
x.append(seq)
y.append(d["label"])
except:
except Exception:
print("Error evaluating / tokenizing" " line {}, skipping it".format(i))
pass
@ -343,7 +343,7 @@ def train_discriminator(
continue
x.append(seq)
y.append(int(np.sum(d["label"]) > 0))
except:
except Exception:
print("Error evaluating / tokenizing" " line {}, skipping it".format(i))
pass
@ -402,7 +402,7 @@ def train_discriminator(
x.append(seq)
y.append(class2idx[label])
except:
except Exception:
print("Error tokenizing line {}, skipping it".format(i))
pass

View File

@ -64,7 +64,7 @@ from transformers import glue_processors as processors
try:
from torch.utils.tensorboard import SummaryWriter
except:
except ImportError:
from tensorboardX import SummaryWriter

View File

@ -63,7 +63,7 @@ from transformers import (
try:
from torch.utils.tensorboard import SummaryWriter
except:
except ImportError:
from tensorboardX import SummaryWriter

View File

@ -48,7 +48,7 @@ from utils_multiple_choice import convert_examples_to_features, processors
try:
from torch.utils.tensorboard import SummaryWriter
except:
except ImportError:
from tensorboardX import SummaryWriter

View File

@ -64,7 +64,7 @@ from transformers.data.processors.squad import SquadResult, SquadV1Processor, Sq
try:
from torch.utils.tensorboard import SummaryWriter
except:
except ImportError:
from tensorboardX import SummaryWriter

View File

@ -52,7 +52,7 @@ from transformers import xnli_processors as processors
try:
from torch.utils.tensorboard import SummaryWriter
except:
except ImportError:
from tensorboardX import SummaryWriter

View File

@ -63,7 +63,7 @@ from utils_squad_evaluate import main as evaluate_on_squad
try:
from torch.utils.tensorboard import SummaryWriter
except:
except ImportError:
from tensorboardX import SummaryWriter

View File

@ -6,12 +6,12 @@ __version__ = "2.3.0"
# and: https://github.com/tensorflow/tensorflow/issues/26691#issuecomment-500369493
try:
import absl.logging
except ImportError:
pass
else:
absl.logging.set_verbosity("info")
absl.logging.set_stderrthreshold("info")
absl.logging._warn_preinit_stderr = False
except:
pass
import logging

View File

@ -205,10 +205,8 @@ class HfFolder:
try:
with open(cls.path_token, "r") as f:
return f.read()
except:
# this is too wide. When Py2 is dead use:
# `except FileNotFoundError:` instead
return None
except FileNotFoundError:
pass
@classmethod
def delete_token(cls):
@ -218,5 +216,5 @@ class HfFolder:
"""
try:
os.remove(cls.path_token)
except:
return
except FileNotFoundError:
pass

View File

@ -439,7 +439,7 @@ class PreTrainedModel(nn.Module):
if state_dict is None and not from_tf:
try:
state_dict = torch.load(resolved_archive_file, map_location="cpu")
except:
except Exception:
raise OSError(
"Unable to load weights from pytorch checkpoint file. "
"If you tried to load a PyTorch model from a TF 2.0 checkpoint, please set from_tf=True. "

View File

@ -333,13 +333,13 @@ class TFCommonTestCases:
# We used to fall back to just synthetically creating a dummy tensor of ones:
try:
x = wte(input_ids, mode="embedding")
except:
except Exception:
try:
x = wte([input_ids], mode="embedding")
except:
except Exception:
try:
x = wte([input_ids, None, None, None], mode="embedding")
except:
except Exception:
if hasattr(self.model_tester, "embedding_size"):
x = tf.ones(input_ids.shape + [self.model_tester.embedding_size], dtype=tf.dtypes.float32)
else:

View File

@ -168,11 +168,12 @@ class CTRLTokenizer(PreTrainedTokenizer):
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)

View File

@ -178,11 +178,12 @@ class GPT2Tokenizer(PreTrainedTokenizer):
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)

View File

@ -136,11 +136,12 @@ class OpenAIGPTTokenizer(PreTrainedTokenizer):
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)

View File

@ -683,11 +683,12 @@ class XLMTokenizer(PreTrainedTokenizer):
while i < len(word):
try:
j = word.index(first, i)
new_word.extend(word[i:j])
i = j
except:
except ValueError:
new_word.extend(word[i:])
break
else:
new_word.extend(word[i:j])
i = j
if word[i] == first and i < len(word) - 1 and word[i + 1] == second:
new_word.append(first + second)