def main(): args = _parse_args() print(args) preprocess(args) prepare_dataset(args) test(args)
def main(argv=None): if FLAGS.mode == 'preprocess': prepare_dataset(FLAGS.data_path, FLAGS.subset, FLAGS.split_valid, FLAGS.slice_duration, FLAGS.save_as) exit() if FLAGS.mode == 'generate_noisy': generate_noisy_signal(FLAGS.song_path) exit() model = RNN( train_dir=FLAGS.data_path + '/train', val_dir=FLAGS.data_path + '/valid', test_dir=FLAGS.data_path + '/test', train_batch_size=FLAGS.train_batch_size, valid_batch_size=FLAGS.valid_batch_size, test_batch_size=FLAGS.test_batch_size, n_inputs=FLAGS.n_inputs, seq_length=FLAGS.seq_length, num_epochs=FLAGS.num_epochs, learning_rate=FLAGS.learning_rate, base_dir=FLAGS.base_dir, max_to_keep=FLAGS.max_to_keep, model_name=FLAGS.model_name ) model.create_network() model.initialize_network() if FLAGS.mode == 'train': model.train_model(FLAGS.display_step, FLAGS.validation_step, FLAGS.checkpoint_step, FLAGS.summary_step) elif FLAGS.mode == 'test': model.test_model() elif FLAGS.mode == 'test_song': model.estimate_test_song(FLAGS.noisy_song_path, FLAGS.output_estimated_path)
def main(): lr = 1e-3 epochs = 10 dataset = GaitSequenceDataset(root_dir=data_dir, longest_sequence=85, shortest_sequence=55) dataloader = data.DataLoader(dataset, batch_size=1, shuffle=True) train_dataset, test_dataset = prepare_dataset(dataloader) encoder, decoder, embeddings, f_loss = encoding(train_dataset, 32, lr=lr, epoch=epochs, logging=True) torch.save([encoder, decoder], 'autoencoder_final.pkl') print(f_loss) #test_set, seq_len, num_features = get_data_dimensions(test_dataset[0:1]) test_encoding = encoder(test_dataset[0:1].float()) test_decoding = decoder(test_encoding)
def main(): batch_size = 1 lr = 1e-3 epochs = 10 dataset = GaitSequenceDataset(root_dir=data_dir, longest_sequence=120, shortest_sequence=55) dataloader = data.DataLoader(dataset, batch_size, shuffle=True) #divide data into train and test train_dataset, test_dataset = prepare_dataset(dataloader) #embedding and loss of dataset encoder, decoder, embeddings, f_loss = encoding_1d(train_dataset, lr=lr, epoch=epochs) torch.save([encoder, decoder], 'autoencoder_final.pkl') print(f_loss) #test_set, seq_len, num_features = get_data_dimensions(test_dataset[0:1]) test_encoding = encoder(test_dataset[0:1].float()) test_decoding = decoder(test_encoding)
def main(): args = vars(parser.parse_args()) if len(sys.argv) == 1: parser.print_help(sys.stderr) sys.exit(1) if args['mode'] == 'preprocess': print('args data_path ', args['data_path']) print('args hop_length', args['hop_length']) # Read audio files once and store them with numpy extension for quicker processing during training # Make PREPARATION_NEEDED=True if dataset is new/changed, else set it False prepare_dataset(args['data_path'], args['data_subset'], args['out_dir'], args['processed_csv_dir'], n_fft=args['n_fft'], hop_length=args['hop_length'], slice_duration=args['slice_duration'], n_workers=args['workers']) elif args['mode'] == 'train': # Defining model if 'SCUNet' == args['model_name']: model = Generator(1) elif 'VggUNet' == args['model_name']: model = VggUNet() elif 'ResUNet' == args['model_name']: model = ResUNet() elif 'VRCNet' == args['model_name']: model = VRCNet() elif 'VCNet' == args['model_name']: model = VCNet() else: print('Sorry. That model currently is not implemented') return # If pre-trained weights are specified, load them: if args['pretrained_model']: try: model.load_state_dict(torch.load(args['pretrained_model'])) except (UnpicklingError, FileNotFoundError) as e: print(e) print('The pretrained model path is not correct!') return # Start training train.train(model, model_type=args['model_name'], train_csv=args['data_path'], validation_csv=args['valid_path'], # scheduler=StepLR, use_log_scale=args['log_scale'], gpu=args['gpu'], epochs=args['epochs'], lr=args['lr'], batch_size=args['batch_size'], model_weight_name=args['model_weight_name'], log_dir=args['log_dir'], log_name=args['log_name'], train_info_file=args['train_info_file'], n_workers=args['workers']) elif args['mode'] == 'test': if 'SCUNet' == args['model_name']: model = Generator(1) elif 'VggUNet' == args['model_name']: model = VggUNet() elif 'ResUNet' == args['model_name']: model = ResUNet() else: print('Sorry. That model currently is not implemented') return calculate_score(model, model_weights_path=args['model_weight_name'], musdb_dir=args['data_path'], n_workers=args['workers'])
import preprocess as pp import open3d as o3d import numpy as np import copy import os # prepare dataset source_model = pp.prepare_dataset("../../data/5nix_compound/model_ligand.ply") target_model = pp.prepare_dataset("../../data/5nix_compound/model_pocket.ply") source = copy.deepcopy(source_model) # change color pp.change_pcd_color(source_model.pcd, target_model.pcd) source.pcd.paint_uniform_color([0.91, 0.65, 0.82]) # make initial state of the pose trans_init = np.asarray([[0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) source.pcd.transform(trans_init) # estimate normal source.estimate_normal(3.1, 471) target_model.estimate_normal(3.1, 471) # compute fpfh feature source.calculate_fpfh(3.1, 135) target_model.calculate_fpfh(3.1, 135)