def test_loading(train_prep, test_prep, path, training): if training == True: dir_names = os.listdir(path) dir_names = list(filter(lambda x: os.path.isdir(f"{path}/{x}"), dir_names)) len_dir_names = len(dir_names) images, labels, dir_names = ld.load_data(dir_names, train=True, batch_size=2, sequence_size=15) assert len(dir_names) == len_dir_names-2 assert images.size() == (2, 15, 3, 224, 224) assert len(labels) == 2 else: dir_names = os.listdir(path) dir_names = list(filter(lambda x: os.path.isdir(f"{path}/{x}"), dir_names)) images, names = ld.load_data(dir_names, train=False, batch_size=3, sequence_size=10) assert images.size() == (3, 10, 3, 224, 224) assert len(names) == 3
def predict(self, dir_names, verbose=False, preprocess=False, saving_results=False, input_path='./', output_path='./'): if preprocess == True: prep.movies_preprocess(os.listdir(input_path), train=False, n_subclips=1, verbose=verbose) dir_names = os.listdir(PREDICTION_PATH) dir_names = list( filter(lambda x: os.path.isdir(f"{PREDICTION_PATH}/{x}"), dir_names)) images, names = load_data(dir_names, train=False, verbose=verbose, batch_size=len(os.listdir(input_path))) predictions = self.forward(images).detach() if saving_results == True: with open(output_path, 'w') as f: for name, score in zip(names, predictions): f.write(f"{name} - {score.item()}\n") return predictions
def predict_single(self, movie_name, verbose=False): print('----------GETTING PREDICTION-----------') prep.preprocess(movie_name, train=False, n_subclips=1, verbose=verbose) name = movie_name.rsplit('.', 1)[0] + '_0' #Getting appropriate name image, _ = load_data([name], train=False, verbose=verbose, batch_size=1) prediction = self.forward(image).detach().unsqueeze(-1).item() print('---------------------------------------') return prediction
def train(): args = get_args() if args.preprocessing == True: prep.movies_preprocess(os.listdir(FPATH), train=True, n_subclips=args.n_subclips, verbose=args.verbose) model = LRCN(activation=args.activation) model.to(args.device) dir_names = os.listdir(TRAINING_PATH) print(dir_names) X_test, y_test, dir_names = ld.load_data(dir_names, train=True, verbose=args.verbose, batch_size=args.test_batch_size) logs = model.fit(dir_names, X_test, y_test, lr=args.learning_rate, loss_name=args.loss, n_epoch=args.n_epoch, batch_size=args.batch_size, device=args.device, verbose=args.verbose, saving_results=args.save_weights) if args.logs: with open(f'{FPATH}/logs.pkl', 'wb') as f: pickle.dump(logs, f)
def test_train_predict_single(train_prep, test_prep): lrcn = m.LRCN() dir_names = os.listdir(TRAINING_PATH) dir_names = list( filter(lambda x: os.path.isdir(f"{TRAINING_PATH}/{x}"), dir_names)) #Filtering waste files X_test, y_test, dir_names = ld.load_data(dir_names, train=True, verbose=True, batch_size=1) logs = lrcn.fit(dir_names, X_test, y_test, n_epoch=2, verbose=True, saving_results=True) assert len(logs[0]) == 2 assert len(logs[1]) == 2 y = lrcn.predict_single('85.Дикая.mp4', verbose=True) assert isinstance(y, float)
def fit(self, dir_names, X_test, y_test, lr=3e-4, loss_name='mse', n_epoch=5, batch_size=10, device='cpu', saving_results=False, use_tensorb=False, verbose=False): #Activating tensorboard # if use_tensorb: # tb = SummaryWriter() optimizer = torch.optim.Adam(self.parameters(), lr=lr) loss = loss_choice(loss_name) dir_names = list( filter(lambda x: os.path.isdir(f"{TRAINING_PATH}/{x}"), dir_names)) #Filtering waste files random.shuffle(dir_names) train_loss_history = [] test_loss_history = [] learning_dir_names = dir_names.copy() #Training model print('---------------TRAINING----------------') for epoch in range(n_epoch): dir_names = learning_dir_names.copy() train_loss = 0 for i in range(0, len(learning_dir_names), batch_size): optimizer.zero_grad() print(dir_names) X_batch, y_batch, dir_names = load_data(dir_names, train=True, verbose=verbose, batch_size=batch_size) X_batch = X_batch.to(device).float() y_batch = y_batch.to(device).float() preds = self.forward(X_batch).view(y_batch.size()[0]) loss_value = loss(preds, y_batch) loss_value.backward() train_loss += loss_value.data.cpu() optimizer.step() train_loss_history.append(train_loss) with torch.no_grad(): test_preds = self.forward(X_test).view(y_test.size()[0]) test_loss_history.append(loss(test_preds, y_test).data.cpu()) # if use_tensorb: # tb.add_scalar(loss_name, test_loss_history[-1], epoch) print(f"{epoch+1}: {loss_name} = {test_loss_history[-1]}") # if use_tensorb: # tb.close() if saving_results == True: torch.save(self.state_dict(), MODEL_WEIGHTS) print('---------------------------------------') return [train_loss_history, test_loss_history]