def test_get_device(mock_check_output, mock_is_available): mock_check_output.return_value = mock_nvidia_smi_output() mock_is_available.return_value = True device = get_device() assert device == 'cuda:1' mock_check_output.side_effect = subprocess.SubprocessError device = get_device() assert device == 'cpu' mock_is_available.return_value = False mock_check_output.return_value = '' device = get_device() assert device == 'cpu'
def evaluate_dataset(self, test_root, test_csv_file, model=None): test_dataset = MyData(csv_file=test_csv_file, root=test_root, test=True) test_data = torch.utils.data.DataLoader( test_dataset, # TODO batch_size=8, shuffle=False, pin_memory=True) if model is None: model = self.load_searcher().load_best_model().produce_model() device = get_device() model.to(device) model.eval() all_targets = [] all_predicted = [] with torch.no_grad(): for batch_idx, (inputs, targets) in enumerate(test_data): inputs, targets = inputs.to(device), targets.to(device) outputs = model(inputs) all_predicted.append(outputs.cpu().numpy()) all_targets.append(targets.cpu().numpy()) all_predicted = reduce(lambda x, y: np.concatenate((x, y)), all_predicted) all_targets = reduce(lambda x, y: np.concatenate((x, y)), all_targets) return self.metric.compute(all_predicted, all_targets)
def __init__(self): super(FaceDetector, self).__init__() pnet, rnet, onet = self.load() self.device = get_device() self.pnet_detector = PNet() if torch.cuda.is_available(): self.pnet_detector.load_state_dict(torch.load(pnet)) else: self.pnet_detector.load_state_dict(torch.load(pnet, map_location=lambda storage, loc: storage)) self.pnet_detector = self.pnet_detector.to(self.device) self.pnet_detector.eval() self.rnet_detector = RNet() if torch.cuda.is_available(): self.rnet_detector.load_state_dict(torch.load(rnet)) else: self.rnet_detector.load_state_dict(torch.load(rnet, map_location=lambda storage, loc: storage)) self.rnet_detector = self.rnet_detector.to(self.device) self.rnet_detector.eval() self.onet_detector = ONet() if torch.cuda.is_available(): self.onet_detector.load_state_dict(torch.load(onet)) else: self.onet_detector.load_state_dict(torch.load(onet, map_location=lambda storage, loc: storage)) self.onet_detector = self.onet_detector.to(self.device) self.onet_detector.eval() self.min_face_size = 24 self.stride = 2 self.threshold = [0.6, 0.7, 0.7] self.scale_factor = 0.709
def __init__(self, model_path=None, overwrite=False): super(VoiceGenerator, self).__init__() self.model_path = model_path if model_path is not None else temp_path_generator() ensure_dir(self.model_path) self.checkpoint_path = os.path.join(self.model_path, Constant.PRE_TRAIN_VOICE_GENERATOR_MODEL_NAME) self.sample_rate = 0 self.hop_length = 0 self.overwrite = overwrite self.device = get_device() self.load()
def __init__(self, verbose, **kwargs): super().__init__(verbose=verbose, **kwargs) self.device = get_device() # BERT specific self.bert_model = 'bert-base-uncased' self.tokenizer = BertTokenizer.from_pretrained(self.bert_model, do_lower_case=True) # Labels/classes self.num_labels = None
def load(self): self.device = get_device() self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) output_model_file = os.path.join(tempfile.gettempdir(), 'text_sentiment_pytorch_model.bin') download_file_from_google_drive(TEXT_SENTIMENT_FILE_ID, output_model_file) model_state_dict = torch.load(output_model_file, map_location=lambda storage, loc: storage) self.model = BertForSequenceClassification.from_pretrained('bert-base-uncased', state_dict=model_state_dict) self.model.to(self.device)
def __init__(self, loss_function, train_data, test_data=None, metric=None, verbose=False): self.device = get_device() self.metric = metric self.verbose = verbose self.loss_function = loss_function self.train_loader = train_data self.test_loader = test_data
def load(self): self.device = get_device() self.tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', do_lower_case=True) output_model_file = os.path.join(tempfile.gettempdir(), self.model_dir) download_file_from_google_drive(self.file_id, output_model_file) model_state_dict = torch.load( output_model_file, map_location=lambda storage, loc: storage) self.model = BertForSequenceClassification.from_pretrained( 'bert-base-uncased', state_dict=model_state_dict, num_labels=self.num_classes) self.model.to(self.device)
def generate(self, input_sample=None): if input_sample is None: input_sample = torch.randn(self.gen_training_result[1], self.nz, 1, 1, device=get_device()) if not isinstance(input_sample, torch.Tensor) and \ isinstance(input_sample, np.ndarray): input_sample = torch.from_numpy(input_sample) if not isinstance(input_sample, torch.Tensor) and \ not isinstance(input_sample, np.ndarray): raise TypeError("Input should be a torch.tensor or a numpy.ndarray") self.net_g.eval() with torch.no_grad(): input_sample = input_sample.to(get_device()) generated_fake = self.net_g(input_sample) vutils.save_image(generated_fake.detach(), '%s/evaluation.png' % self.gen_training_result[0], normalize=True)
def __init__(self, loss_function, train_data, test_data=None, metric=None, verbose=False, device=None): if device: self.device = device else: self.device = get_device() self.metric = metric self.verbose = verbose self.loss_function = loss_function self.train_loader = train_data self.test_loader = test_data self._timeout = None
def evaluate(self, test_data): """Evaluate the performance of the best architecture in terms of the loss. Args: test_data: A DataLoader instance representing the testing data. """ model = self.best_model.produce_model() model.eval() device = get_device() target, prediction = [], [] with torch.no_grad(): for _, (x, y) in enumerate(test_data): x, y = x.to(device), y.to(device) prediction.append(model(x)) target.append(y) return self.metric().compute(prediction, target)
def __init__(self, verbose=True, model_path=None): """Initialize the instance.""" self.verbose = verbose self.model = None self.device = get_device() self.model_path = model_path if model_path is not None else temp_path_generator( ) ensure_dir(self.model_path) self.local_paths = [ os.path.join(self.model_path, x.local_name) for x in self._google_drive_files ] for path, x in zip(self.local_paths, self._google_drive_files): if not os.path.exists(path): download_file_from_google_drive(file_id=x.google_drive_id, dest_path=path, verbose=True)
def __init__(self, loss_function, train_data, test_data=None, metric=None, verbose=False, device=None): if device is not None: self.device = device else: self.device = get_device() self.metric = metric self.verbose = verbose self.loss_function = loss_function self.train_loader = train_data self.test_loader = test_data self._timeout = None
def __init__(self, loss_function, train_data, test_data=None, metric=None, verbose=False, device=None): if device: self.device = device else: self.device = get_device() self.device = self.device + ":" + str(torch.cuda.current_device()) self.metric = metric self.verbose = verbose self.loss_function = loss_function self.train_loader = train_data self.test_loader = test_data self._timeout = None
def __init__(self): super(ObjectDetector, self).__init__() self.model = None self.device = get_device() # load net num_classes = len(VOC_CLASSES) + 1 # +1 for background self.model = self._build_ssd('test', 300, num_classes) # initialize SSD if self.device.startswith("cuda"): self.model.load_state_dict(torch.load(self.local_paths[0])) else: self.model.load_state_dict( torch.load(self.local_paths[0], map_location=lambda storage, loc: storage)) self.model.eval() print('Finished loading model!') self.model = self.model.to(self.device)
def __init__(self, verbose, **kwargs): super().__init__(**kwargs) self.device = get_device() self.verbose = verbose # BERT specific self.bert_model = 'bert-base-uncased' self.max_seq_length = 128 self.tokenizer = BertTokenizer.from_pretrained(self.bert_model, do_lower_case=True) # Labels/classes self.num_labels = None # Output directory self.output_model_file = os.path.join(self.path, 'pytorch_model.bin') # Evaluation params self.eval_batch_size = 32
def fit(self, x_train): """ Train only Args: x_train: ndarray contained the training data Returns: """ # input size stay the same, enable cudnn optimization cudnn.benchmark = True self.data_transformer = ImageDataTransformer(x_train, augment=self.augment) train_dataloader = self.data_transformer.transform_train(x_train) GANModelTrainer(self.net_g, self.net_d, train_dataloader, binary_classification_loss, self.verbose, self.gen_training_result, device=get_device()).train_model()
def __init__(self): super(FaceDetector, self).__init__() self.load() self.device = get_device() pnet, rnet, onet = list(map(lambda file_name: f'{temp_path_generator()}/{file_name}', Constant.FACE_DETECTION_PRETRAINED['FILE_NAMES'])) self.pnet_detector = PNet() if torch.cuda.is_available(): self.pnet_detector.load_state_dict(torch.load(pnet)) else: self.pnet_detector.load_state_dict(torch.load(pnet, map_location=lambda storage, loc: storage)) self.pnet_detector = self.pnet_detector.to(self.device) self.pnet_detector.eval() self.rnet_detector = RNet() if torch.cuda.is_available(): self.rnet_detector.load_state_dict(torch.load(rnet)) else: self.rnet_detector.load_state_dict(torch.load(rnet, map_location=lambda storage, loc: storage)) self.rnet_detector = self.rnet_detector.to(self.device) self.rnet_detector.eval() self.onet_detector = ONet() if torch.cuda.is_available(): self.onet_detector.load_state_dict(torch.load(onet)) else: self.onet_detector.load_state_dict(torch.load(onet, map_location=lambda storage, loc: storage)) self.onet_detector = self.onet_detector.to(self.device) self.onet_detector.eval() self.min_face_size = 24 self.stride = 2 self.threshold = [0.6, 0.7, 0.7] self.scale_factor = 0.709
def __init__(self, nz=100, ngf=32, ndf=32, nc=3, verbose=False, gen_training_result=None, augment=None): """ Args: nz: size of the latent z vector ngf: of gen filters in first conv layer ndf: of discrim filters in first conv layer nc: number of input chanel verbose: A boolean of whether the search process will be printed to stdout. gen_training_result: A tuple of (path, size) to denote where to output the intermediate result with size augment: A boolean value indicating whether the data needs augmentation. """ super().__init__(verbose) self.nz = nz self.ngf = ngf self.ndf = ndf self.nc = nc self.verbose = verbose self.device = get_device() self.gen_training_result = gen_training_result self.augment = augment if augment is not None else Constant.DATA_AUGMENTATION self.data_transformer = None self.net_d = Discriminator(self.nc, self.ndf) self.net_g = Generator(self.nc, self.nz, self.ngf)
def __init__(self): super(ObjectDetector, self).__init__() self.model = None self.device = get_device()