features = args.nfeatures model = models.GraphNetwork(args.model_config, features, multigpu=args.multigpu, default_fnet_widths=args.fnet_widths, default_fnet_llbias=args.fnet_llbias, default_edge_attr=args.edge_attr, default_conv_bias=args.conv_bias, default_fnet_tanh=args.fnet_tanh) print('loading pretrain') if (os.path.isfile(args.pretrain_path)): _, _, model_state, _ = utils.load_checkpoint(args.pretrain_path) model.load_state_dict(model_state) else: print('Wrong pretrain path') exit() print(model) if args.cuda is True and args.multigpu is False: model = model.to('cuda:0') label_path = os.path.join(args.dataset_path, args.classname) if not os.path.isfile(label_path): raise RuntimeError("label file does not exist") label_names = utils.read_string_list(label_path) assert args.batch_size % args.batch_parts == 0
args.fnet_widths = ast.literal_eval(args.fnet_widths) features = args.nfeatures model = models.GraphNetwork(args.model_config, features, multigpu=args.multigpu, default_fnet_widths=args.fnet_widths, default_fnet_llbias=args.fnet_llbias, default_edge_attr=args.edge_attr, default_conv_bias=args.conv_bias, default_fnet_tanh=args.fnet_tanh) print('loading pretrain') if (os.path.exists(args.pretrain_path)): _, stored_mean_acc, model_state, _ = utils.load_checkpoint( args.pretrain_path) model.load_state_dict(model_state) else: print('Wrong pretrain path') exit() if args.cuda is True and args.multigpu is False: model = model.to('cuda:0') print(model) label_path = os.path.join(args.dataset_path, args.classname) if not os.path.isfile(label_path): raise RuntimeError("label file does not exist") label_names = utils.read_string_list(label_path)
num_workers=args.nworkers, shuffle=False, pin_memory=False) is_best_meanacc = False best_meanacc = 0 start_epoch = 0 resume_done = 0 if args.resume: checkpoint_path_file = os.path.join(checkpoint_path, 'checkpoint_latest.pth.tar') if (os.path.isfile(checkpoint_path_file)): resume_done = 1 epoch, best_meanacc, model_state, optimizer_state = utils.load_checkpoint( checkpoint_path_file) start_epoch = epoch + 1 model.load_state_dict(model_state) if len(optimizer_state['state']) > 0: optimizer.load_state_dict(optimizer_state['optimizer']) else: print('There are problems with the optimizer state') else: print('Checkpoint does not exist, starting new trainning') if args.transfer_learning != '' and args.transfer_learning != '-' and resume_done == 0: if not os.path.isfile(args.transfer_learning): raise RuntimeError("Transfer learning model does not exist") model_dict = model.state_dict()