def test_model(self): data_set = self._data_set features = [0, 1] ann = model.generate_model(data_set, 5, 5, features) self.assertEqual(len(ann.signal_configuration), len(features)) mid = data_set.shape[0] / 2 model.train_ann(ann, data_set[:mid,:], 10, features) e = model.fire_ann(ann, data_set[mid:,:], features) print e
def main(): conn = utils.autoscale.MultipleAutoScaleGroups( options.aq_groups.split(',')) predictor = model.predictor.DeepnetPredictor(aquila_connection=conn) _log.info('Searching for clips in %s' % options.input) mov = cv2.VideoCapture(options.input) _log.info('Opening model %s' % options.model) mod = model.generate_model(options.model, predictor) if options.custom_predictor is not None: mod.clip_finder.custom_predictor = model.load_custom_predictor( options.custom_predictor) mod.clip_finder.scene_detector.threshold = 30.0 mod.clip_finder.scene_detector.min_scene_len = 30 mod.clip_finder.weight_dict['custom'] = 1.0 mod.clip_finder.weight_dict['valence'] = 1.0 clips = [] try: predictor.connect() clips = mod.find_clips(mov, options.n, max_len=options.len, min_len=options.len) finally: predictor.shutdown() clip_i = 0 for clip in clips: out_splits = options.output.rpartition('.') out_fn = '%s_%i.%s' % (out_splits[0], clip_i, out_splits[2]) _log.info('Output clip %i with score %f to %s' % (clip_i, clip.score, out_fn)) clip_i += 1 writer = imageio.get_writer(out_fn, 'FFMPEG', fps=30.0) try: for frame in pycvutils.iterate_video(mov, clip.start, clip.end): writer.append_data(frame[:,:,::-1]) finally: writer.close()
def main(options): _log.info('Loading model') conn = utils.autoscale.MultipleAutoScaleGroups( options.autoscale_groups.split(',')) conn.get_ip() pred = predictor.DeepnetPredictor(aquila_connection=conn) pred.connect() try: mod = model.generate_model(options.model, pred) if options.video is not None: run_one_video(mod, options.video, options.n, options.output, options.batch) elif options.video_list is not None: for line in open(options.video_list): run_one_video(mod, line.strip(), options.n, None, options.batch) finally: pred.shutdown()
def main_worker(index, opt): random.seed(opt.manual_seed) np.random.seed(opt.manual_seed) torch.manual_seed(opt.manual_seed) if index >= 0 and opt.device.type == 'cuda': opt.device = torch.device(f'cuda:{index}') if opt.distributed: opt.dist_rank = opt.dist_rank * opt.ngpus_per_node + index dist.init_process_group(backend='nccl', init_method=opt.dist_url, world_size=opt.world_size, rank=opt.dist_rank) opt.batch_size = int(opt.batch_size / opt.ngpus_per_node) opt.n_threads = int( (opt.n_threads + opt.ngpus_per_node - 1) / opt.ngpus_per_node) opt.is_master_node = not opt.distributed or opt.dist_rank == 0 model = generate_model(opt) if opt.batchnorm_sync: assert opt.distributed, 'SyncBatchNorm only supports DistributedDataParallel.' model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) if opt.pretrain_path: model = load_pretrained_model(model, opt.pretrain_path, opt.model, opt.n_finetune_classes) if opt.dropout: n_classes = opt.n_classes if opt.pretrain_path is not None: n_classes = opt.n_finetune_classes model = replace_fc_layer(model=model, dropout_factor=opt.dropout_factor, n_classes=n_classes) if opt.resume_path is not None: model = resume_model(opt.resume_path, opt.arch, model) model = make_data_parallel(model, opt.distributed, opt.device) if opt.pretrain_path: parameters = get_fine_tuning_parameters(model, opt.ft_begin_module) else: parameters = model.parameters() if opt.is_master_node: print(model) if opt.labelsmoothing: criterion = LabelSmoothingCrossEntropy().to(opt.device) else: criterion = CrossEntropyLoss().to(opt.device) if not opt.no_train: (train_loader, train_sampler, train_logger, train_batch_logger, optimizer, scheduler) = get_train_utils(opt, parameters) if opt.resume_path is not None: opt.begin_epoch, optimizer, scheduler = resume_train_utils( opt.resume_path, opt.begin_epoch, optimizer, scheduler) if opt.overwrite_milestones: scheduler.milestones = opt.multistep_milestones if not opt.no_val: val_loader, val_logger = get_val_utils(opt) if opt.tensorboard and opt.is_master_node: from torch.utils.tensorboard import SummaryWriter if opt.begin_epoch == 1: tb_writer = SummaryWriter(log_dir=opt.result_path) else: tb_writer = SummaryWriter(log_dir=opt.result_path, purge_step=opt.begin_epoch) else: tb_writer = None if opt.lr_finder and not opt.no_train and not opt.no_val: print( "Performing Learning Rate Search\nWith Leslie Smith's approach...") lr_finder = LRFinder(model, optimizer, criterion, device=opt.device) lr_finder.range_test(train_loader, val_loader=val_loader, start_lr=opt.learning_rate, end_lr=opt.lrf_end_lr, num_iter=opt.lrf_num_it, step_mode=opt.lrf_mode) lr_finder.plot(log_lr=False) with (opt.result_path / 'lr_search.json').open('w') as results_file: json.dump(lr_finder.history, results_file, default=json_serial) lr_finder.reset() return prev_val_loss = None for i in range(opt.begin_epoch, opt.n_epochs + 1): if not opt.no_train: if opt.distributed: train_sampler.set_epoch(i) #current_lr = get_lr(optimizer) train_epoch(i, train_loader, model, criterion, optimizer, opt.device, train_logger, train_batch_logger, scheduler, opt.lr_scheduler, tb_writer, opt.distributed) if i % opt.checkpoint == 0 and opt.is_master_node: save_file_path = opt.result_path / 'save_{}.pth'.format(i) save_checkpoint(save_file_path, i, opt.arch, model, optimizer, scheduler) if not opt.no_val: prev_val_loss = val_epoch(i, val_loader, model, criterion, opt.device, val_logger, tb_writer, opt.distributed) if not opt.no_train and opt.lr_scheduler == 'multistep': scheduler.step() elif not opt.no_train and opt.lr_scheduler == 'plateau': scheduler.step(prev_val_loss) elif not opt.no_train and opt.lr_scheduler == 'cosineannealing': scheduler.step() if opt.inference: inference_loader, inference_class_names = get_inference_utils(opt) inference_result_path = opt.result_path / '{}.json'.format( opt.inference_subset) inference.inference(inference_loader, model, inference_result_path, inference_class_names, opt.inference_no_average, opt.output_topk)
isave_acc_auc_lst = True print( "Testing: Epoch %d:%dth batch, learning rate %2.6f loss %2.4f, acc %2.4f, auc %2.4f,precision %2.4f,recall %2.4f!" % (epoch, i, lr, test_loss, acc, auc, prec, recall)) return max_acc, max_auc, acc_max, auc_max, max_acc_auc, test_loss, isave, pred_target_dict, isave_acc_lst, isave_auc_lst, isave_acc_auc_lst if __name__ == '__main__': # Initialize the opts opt = parse_opts() # opt.mean = get_mean(1) opt.arch = '{}-{}'.format(opt.model_name, opt.model_depth) opt.scales = [opt.initial_scale] # import pdb;pdb.set_trace() model, policies = generate_model(opt) model = nn.DataParallel(model.cuda()) # import pdb;pdb.set_trace() if "FP" in opt.save_dir: if "FP1" in opt.save_dir: loss = FPLoss1() else: loss = FPLoss() elif "RC" in opt.save_dir: loss = RCLoss() elif "AUCP" in opt.save_dir: loss = AUCPLoss(lamb=opt.lam, alpha=opt.alp) elif "AUCH" in opt.save_dir: print("AUCH") loss = AUCHLoss()
def main_worker(index, opt): random.seed(opt.manual_seed) np.random.seed(opt.manual_seed) torch.manual_seed(opt.manual_seed) if index >= 0 and opt.device.type == 'cuda': opt.device = torch.device(f'cuda:{index}') if opt.distributed: opt.dist_rank = opt.dist_rank * opt.ngpus_per_node + index dist.init_process_group(backend='nccl', init_method=opt.dist_url, world_size=opt.world_size, rank=opt.dist_rank) opt.batch_size = int(opt.batch_size / opt.ngpus_per_node) opt.n_threads = int( (opt.n_threads + opt.ngpus_per_node - 1) / opt.ngpus_per_node) opt.is_master_node = not opt.distributed or opt.dist_rank == 0 model = generate_model(opt) if opt.batchnorm_sync: assert opt.distributed, 'SyncBatchNorm only supports DistributedDataParallel.' model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) if opt.pretrain_path: model = load_pretrained_model(model, opt.pretrain_path, opt.model, opt.n_finetune_classes, opt.device) if opt.resume_path is not None: model = resume_model(opt.resume_path, opt.arch, model, opt.device) model = make_data_parallel(model, opt.distributed, opt.device) if opt.pretrain_path: parameters = get_fine_tuning_parameters(model, opt.ft_begin_module) else: parameters = model.parameters() if opt.is_master_node: print(model) criterion = CrossEntropyLoss().to(opt.device) if not opt.no_train: (train_loader, train_sampler, train_logger, train_batch_logger, optimizer, scheduler) = get_train_utils(opt, parameters) if opt.resume_path is not None: opt.begin_epoch, optimizer, scheduler = resume_train_utils( opt.resume_path, opt.begin_epoch, optimizer, scheduler) if opt.overwrite_milestones: scheduler.milestones = opt.multistep_milestones if not opt.no_val: val_loader, val_logger = get_val_utils(opt) if opt.tensorboard and opt.is_master_node: # from torch.utils.tensorboard import SummaryWriter from tensorboardX import SummaryWriter if opt.begin_epoch == 1: tb_writer = SummaryWriter(log_dir=opt.result_path) else: tb_writer = SummaryWriter(log_dir=opt.result_path, purge_step=opt.begin_epoch) else: tb_writer = None prev_val_loss = None for i in range(opt.begin_epoch, opt.n_epochs + 1): if not opt.no_train: if opt.distributed: train_sampler.set_epoch(i) current_lr = get_lr(optimizer) train_epoch(i, train_loader, model, criterion, optimizer, opt.device, current_lr, train_logger, train_batch_logger, tb_writer, opt.distributed) if i % opt.checkpoint == 0 and opt.is_master_node: save_file_path = opt.result_path / 'save_{}.pth'.format(i) save_checkpoint(save_file_path, i, opt.arch, model, optimizer, scheduler) if not opt.no_val: prev_val_loss = val_epoch(i, val_loader, model, criterion, opt.device, val_logger, tb_writer, opt.distributed) if not opt.no_train and opt.lr_scheduler == 'multistep': scheduler.step() elif not opt.no_train and opt.lr_scheduler == 'plateau': scheduler.step(prev_val_loss) if opt.inference: inference_loader, inference_class_names = get_inference_utils(opt) inference_result_path = opt.result_path / '{}.json'.format( opt.inference_subset) inference_results = inference.inference(inference_loader, model, inference_result_path, inference_class_names, opt.inference_no_average, opt.output_topk, opt.device) return inference_results return {}
test_loader = torch.utils.data.DataLoader( RoadSequenceDatasetList(file_path=config.test_path, transforms=op_tranforms), batch_size=args.test_batch_size, shuffle=False, num_workers=1) else: test_loader = torch.utils.data.DataLoader( RoadSequenceDataset(file_path=config.test_path, transforms=op_tranforms), batch_size=args.test_batch_size, shuffle=False, num_workers=1) # load model and weights model = generate_model(args) class_weight = torch.Tensor(config.class_weight) criterion = torch.nn.CrossEntropyLoss(weight=class_weight).to(device) pretrained_dict = torch.load(config.pretrained_path) model_dict = model.state_dict() pretrained_dict_1 = { k: v for k, v in pretrained_dict.items() if (k in model_dict) } model_dict.update(pretrained_dict_1) model.load_state_dict(model_dict) # output the result pictures output_result(model, test_loader, device) # calculate the values of accuracy, precision, recall, f1_measure
return captions, final_preds, urls # load data data = loadData(base_dir = directory+"annotations" ) TOTAL_INFERENCE_STEP = 10000 BATCH_SIZE_INFERENCE = 32 hyp = open(directory+"hyp.txt", "a") ref1 = open(directory+"ref1.txt", "a") # Build the TensorFlow graph and train it g = tf.Graph() with g.as_default(): # Build the model. model = generate_model(mode, inference_batch = BATCH_SIZE_INFERENCE) # run training init = tf.global_variables_initializer() with tf.Session() as sess: sess.run(init) model['saver'].restore(sess, directory + "savedSession/model1.0_checkpoint160000.ckpt") print("Model restured! Last step run: ", sess.run(model['global_step'])) for i in range(TOTAL_INFERENCE_STEP): orig_pred, captions_pred, urls = test_rnn(sess, data, BATCH_SIZE_INFERENCE, model, 1.0) # the output is size (32, 16) captions_pred = [unpack.reshape(-1, 1) for unpack in captions_pred] captions_pred = np.concatenate(captions_pred, 1)
opt.resume_path = os.path.join(opt.root_path, opt.resume_path) if opt.pretrain_path: opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path) opt.scales = [opt.initial_scale] for i in range(1, opt.n_scales): opt.scales.append(opt.scales[-1] * opt.scale_step) opt.arch = '{}-{}'.format(opt.model, opt.model_depth) opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset) opt.std = get_std(opt.norm_value) print(opt) with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file: json.dump(vars(opt), opt_file) torch.manual_seed(opt.manual_seed) feature_model, _ = generate_model(opt) print(feature_model) feature_model.eval() if opt.no_mean_norm and not opt.std_norm: norm_method = Normalize([0, 0, 0], [1, 1, 1]) elif not opt.std_norm: norm_method = Normalize(opt.mean, [1, 1, 1]) else: norm_method = Normalize(opt.mean, opt.std) if not opt.no_train: assert opt.train_crop in ['random', 'corner', 'center'] if opt.train_crop == 'random': crop_method = MultiScaleRandomCrop(opt.scales, opt.sample_size) elif opt.train_crop == 'corner':
args = parser.parse_args() args.resnet_shortcut = 'A' args.no_cuda = False args.pretrain_path = args.model_path if args.dataset == 'ucf101': num_class = 101 args.n_classes = 101 img_prefix = 'image_' else: num_class = 174 args.n_classes = 174 img_prefix = '' first_model, parameters_1 = generate_model(args) second_model, parameters_2 = generate_model(args) print(first_model) input('...') if args.no_mean_norm and not args.std_norm: norm_method = Normalize([0, 0, 0], [1, 1, 1]) elif not args.std_norm: norm_method = Normalize(args.mean, [1, 1, 1]) else: norm_method = Normalize(args.mean, args.std) spatial_transform = Compose([ Scale(args.sample_size), CenterCrop(args.sample_size), ToTensor(args.norm_value), norm_method
def main(): opt = parse_opts() print(opt) seed = 0 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) torch.backends.cudnn.deterministic = True torch.backends.cudnn.benchmark = False # CUDA for PyTorch use_cuda = torch.cuda.is_available() device = torch.device(f"cuda:{opt.gpu}" if use_cuda else "cpu") # tensorboard summary_writer = tensorboardX.SummaryWriter(log_dir='tf_logs') train_transform = transforms.Compose([ #transforms.RandomCrop(32, padding=3), transforms.Resize((224, 224)), # transforms.RandomHorizontalFlip(), # transforms.RandomRotation(10), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[ 0.229, 0.224, 0.225]) ]) test_transform = transforms.Compose([ # transforms.RandomHorizontalFlip(), # transforms.RandomRotation(10), #transforms.RandomCrop(32, padding=3), transforms.Resize((224, 224)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[ 0.229, 0.224, 0.225]) ]) # data loaders train_dataset = get_dataset(opt, 'train', transform=train_transform) train_loader = DataLoader(train_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=0, collate_fn=train_dataset.my_collate) val_dataset = get_dataset(opt, 'test', transform=test_transform) val_loader = DataLoader(val_dataset, batch_size=opt.batch_size, shuffle=True, num_workers=0, collate_fn=val_dataset.my_collate) print(f'Number of training examples: {len(train_loader.dataset)}') print(f'Number of validation examples: {len(val_loader.dataset)}') # define model model, parameters = generate_model(opt) model = model.to(device) if torch.cuda.device_count() > 1: print("Let's use", torch.cuda.device_count(), "GPUs!") model = nn.DataParallel(model) if opt.nesterov: dampening = 0 else: dampening = opt.dampening # define optimizer and criterion optimizer = optim.Adam(parameters) # optimizer = optim.SGD( # model.parameters(), # lr=opt.learning_rate, # momentum=opt.momentum, # dampening=dampening, # weight_decay=opt.weight_decay, # nesterov=opt.nesterov) # scheduler = lr_scheduler.ReduceLROnPlateau(optimizer, 'min', patience=opt.lr_patience) criterion = BCEWithLogitsLoss() # pretrained weights if opt.weights: checkpoint = torch.load(opt.weights) model.load_state_dict(checkpoint['model_state_dict'], strict=False) print("Pretrained weights loaded") # resume model, optimizer if already exists if opt.resume_path: checkpoint = torch.load(opt.resume_path) model.load_state_dict(checkpoint['model_state_dict']) optimizer.load_state_dict(checkpoint['optimizer_state_dict']) epoch = checkpoint['epoch'] print("Model Restored from Epoch {}".format(epoch)) start_epoch = epoch + 1 else: start_epoch = 1 # start training th = 10000 for epoch in range(start_epoch, opt.epochs+1): # train, test model train_loss, train_recall = train( model, train_loader, criterion, optimizer, epoch, device, opt) # scheduler.step(train_loss) if (epoch) % opt.save_interval == 0: #val_loss, val_recall = validate(model, val_loader, criterion, epoch, device, opt) # scheduler.step(val_loss) # # write summary # summary_writer.add_scalar( # 'losses/train_loss', train_loss, global_step=epoch) # summary_writer.add_scalar( # 'losses/val_loss', val_loss, global_step=epoch) # summary_writer.add_scalar( # 'acc/train_acc', train_recall, global_step=epoch) # summary_writer.add_scalar( # 'acc/val_acc', val_recall, global_step=epoch) state = {'epoch': epoch, 'model_state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict()} torch.save(state, os.path.join('snapshots', f'model{epoch}.pth')) print("Epoch {} model saved!\n".format(epoch))
request.send_header("Content-type", "application/json") request.send_header("Access-Control-Allow-Origin", "*") request.end_headers() if top_ten_diagnoses_response: request.wfile.write(json.dumps(top_ten_diagnoses_response)) return return RequestHandler if __name__ == '__main__': # The model is generated on the server before it starts running the service. # # Note: Model generation would normally happen in a completely separate # environment than the webserver. diagnosis_dictionary = open(DICTIONARY_PATH).read().splitlines() diagnosis_model = generate_model(diagnosis_dictionary) server_class = BaseHTTPServer.HTTPServer; HandlerClass = WrapRequestHandler(diagnosis_model, diagnosis_dictionary) httpd = server_class((HOST_NAME, PORT_NUMBER), HandlerClass) try: httpd.serve_forever() except KeyboardInterrupt: pass else: print "Unexpected server exception occurred." finally: httpd.server_close()
def main_worker(): opt = parse_opts() print(opt) seed = 1 random.seed(seed) np.random.seed(seed) torch.manual_seed(seed) # CUDA for PyTorch device = torch.device(f"cuda:{opt.gpu}" if opt.use_cuda else "cpu") # tensorboard summary_writer = tensorboardX.SummaryWriter(log_dir='tf_logs') # defining model model = generate_model(opt, device) # get data loaders train_loader, val_loader = get_loaders(opt) # optimizer crnn_params = list(model.parameters()) optimizer = torch.optim.Adam(crnn_params, lr=opt.lr_rate, weight_decay=opt.weight_decay) # scheduler = lr_scheduler.ReduceLROnPlateau( # optimizer, 'min', patience=opt.lr_patience) criterion = nn.CrossEntropyLoss() # resume model if opt.resume_path: start_epoch = resume_model(opt, model, optimizer) else: start_epoch = 1 # start training for epoch in range(start_epoch, opt.n_epochs + 1): train_loss, train_acc = train_epoch(model, train_loader, criterion, optimizer, epoch, opt.log_interval, device) val_loss, val_acc = val_epoch(model, val_loader, criterion, device) # saving weights to checkpoint if (epoch) % opt.save_interval == 0: # scheduler.step(val_loss) # write summary summary_writer.add_scalar('losses/train_loss', train_loss, global_step=epoch) summary_writer.add_scalar('losses/val_loss', val_loss, global_step=epoch) summary_writer.add_scalar('acc/train_acc', train_acc * 100, global_step=epoch) summary_writer.add_scalar('acc/val_acc', val_acc * 100, global_step=epoch) state = { 'epoch': epoch, 'state_dict': model.state_dict(), 'optimizer_state_dict': optimizer.state_dict() } torch.save( state, os.path.join('snapshots', f'{opt.model}-Epoch-{epoch}-Loss-{val_loss}.pth')) print("Epoch {} model saved!\n".format(epoch))
def load_models(opt): opt.resume_path = opt.resume_path_det opt.pretrain_path = opt.pretrain_path_det opt.sample_duration = opt.sample_duration_det opt.model = opt.model_det opt.model_depth = opt.model_depth_det opt.modality = opt.modality_det opt.resnet_shortcut = opt.resnet_shortcut_det opt.n_classes = opt.n_classes_det opt.n_finetune_classes = opt.n_finetune_classes_det if opt.root_path != '': opt.video_path = os.path.join(opt.root_path, opt.video_path) opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path) opt.result_path = os.path.join(opt.root_path, opt.result_path) if opt.resume_path: opt.resume_path = os.path.join(opt.root_path, opt.resume_path) if opt.pretrain_path: opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path) opt.scales = [opt.initial_scale] for i in range(1, opt.n_scales): opt.scales.append(opt.scales[-1] * opt.scale_step) opt.arch = '{}-{}'.format(opt.model, opt.model_depth) opt.mean = get_mean(opt.norm_value) opt.std = get_std(opt.norm_value) print(opt) with open(os.path.join(opt.result_path, 'opts_det.json'), 'w') as opt_file: json.dump(vars(opt), opt_file) torch.manual_seed(opt.manual_seed) detector, parameters = generate_model(opt) if opt.resume_path: print('loading checkpoint {}'.format(opt.resume_path)) checkpoint = torch.load(opt.resume_path) assert opt.arch == checkpoint['arch'] detector.load_state_dict(checkpoint['state_dict']) print('Model 1 \n', detector) pytorch_total_params = sum(p.numel() for p in detector.parameters() if p.requires_grad) print("Total number of trainable parameters: ", pytorch_total_params) # Reset parsed args opt = parse_opts_online() opt.resume_path = opt.resume_path_clf opt.pretrain_path = opt.pretrain_path_clf opt.sample_duration = opt.sample_duration_clf opt.model = opt.model_clf opt.model_depth = opt.model_depth_clf opt.modality = opt.modality_clf opt.resnet_shortcut = opt.resnet_shortcut_clf opt.n_classes = opt.n_classes_clf opt.n_finetune_classes = opt.n_finetune_classes_clf if opt.root_path != '': opt.video_path = os.path.join(opt.root_path, opt.video_path) opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path) opt.result_path = os.path.join(opt.root_path, opt.result_path) if opt.resume_path: opt.resume_path = os.path.join(opt.root_path, opt.resume_path) if opt.pretrain_path: opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path) opt.scales = [opt.initial_scale] for i in range(1, opt.n_scales): opt.scales.append(opt.scales[-1] * opt.scale_step) if opt.model == 'ssar': opt.arch = opt.model else: opt.arch = '{}-{}'.format(opt.model, opt.model_depth) opt.mean = get_mean(opt.norm_value) opt.std = get_std(opt.norm_value) print(opt) with open(os.path.join(opt.result_path, 'opts_clf.json'), 'w') as opt_file: json.dump(vars(opt), opt_file) torch.manual_seed(opt.manual_seed) classifier, parameters = generate_model(opt) if opt.resume_path: print('loading pretrained model {}'.format(opt.pretrain_path)) checkpoint = torch.load(opt.resume_path) assert opt.arch == checkpoint['arch'] classifier.load_state_dict(checkpoint['state_dict']) print('Model 2 \n', classifier) pytorch_total_params = sum(p.numel() for p in classifier.parameters() if p.requires_grad) print("Total number of trainable parameters: ", pytorch_total_params) return detector, classifier
preds = idx[mask] return preds if __name__ == "__main__": opt = parse_opts() print(opt) data = load_annotation_data(opt.annotation_path) class_to_idx = get_class_labels(data) device = torch.device("cpu") print(class_to_idx) idx_to_class = {} for name, label in class_to_idx.items(): idx_to_class[label] = name model = generate_model(opt, device) # model = nn.DataParallel(model, device_ids=None) # print(model) if opt.resume_path: resume_model(opt, model) opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset) opt.std = get_std(opt.norm_value) model.eval() cam = cv2.VideoCapture( 'D:\\VIDEOS\\SNATCH VIDEOS NUEVOS\\1.mp4') clip = [] frame_count = 0 while True: ret, img = cam.read()
def main(): extract_frame = True ######################################################################## # load args and config args = parse_opts() config = get_config(args.config) ######################################################################## # Extract Frames from videos if extract_frame: # extract_frames_from_video(config) data_loader = KiMoReDataLoader(config) data_loader.load_data() df = data_loader.df max_video_sec = data_loader.max_video_sec ######################################################################## # Fixed PyTorch random seed for reproducible result seed = config.getint('random_state', 'seed') np.random.seed(seed) torch.manual_seed(seed) ####################################################################### # Loads the configuration for the experiment from the configuration file if args.model_name == 'cnn': model_name = 'cnn' else: assert False num_epochs = config.getint(model_name, 'epoch') optimizer = config.get(model_name, 'optimizer') learning_rate = config.getfloat(model_name, 'lr') test_size = config.getfloat('dataset', 'test_size') ######################################################################## # list all data files all_X_list = df['video_name'] # all video file names all_y_list = df['clinical TS Ex#1'] # all video labels # train, test split train_list, test_list, train_label, test_label = train_test_split( all_X_list, all_y_list, test_size=test_size, random_state=seed) # Obtain the PyTorch data loader objects to load batches of the datasets train_loader, valid_loader = get_data_loader(train_list, test_list, train_label, test_label, model_name, max_video_sec, config) ######################################################################## # Define a Convolutional Neural Network, defined in models model = generate_model(model_name, config) ######################################################################## # Define the Loss function and optimizer criterion = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=learning_rate, eps=1e-4) ######################################################################## # Set up some numpy arrays to store the training/test loss/accuracy train_loss = np.zeros(num_epochs) val_loss = np.zeros(num_epochs) ######################################################################## # Train the network # Loop over the data iterator and sample a new batch of training data # Get the output from the network, and optimize our loss function. print('Start training {}...'.format(model_name)) start_time = time.time() for epoch in range(num_epochs): # loop over the dataset multiple times total_train_loss = 0.0 total_epoch = 0 for i, data in enumerate(train_loader, 0): # Get the inputs inputs, labels = data # labels = normalize_label(labels) # Convert labels to 0/1 # Zero the parameter gradients optimizer.zero_grad() # Forward pass, backward pass, and optimize outputs = model(inputs) loss = criterion(outputs, labels.float()) loss.backward() optimizer.step() # Calculate the statistics total_train_loss += loss.item() total_epoch += len(labels) print('loss = {}'.format(loss.item())) # TODO: add print statement train_loss[epoch] = float(total_train_loss) / (i + 1) val_loss[epoch] = evaluate(model, valid_loader, criterion) print("Epoch {}: Train loss: {} | Validation loss: {}".format( epoch + 1, train_loss[epoch], val_loss[epoch])) print('Finished Training') end_time = time.time() elapsed_time = end_time - start_time print("Total time elapsed: {:.2f} seconds".format(elapsed_time)) # Save the model to a file torch.save(model.state_dict(), model_name) # Write the train/test loss/err into CSV file for plotting later epochs = np.arange(1, num_epochs + 1) model_path = model_name df = pd.DataFrame({"epoch": epochs, "train_loss": train_loss}) df.to_csv("train_loss_{}.csv".format(model_path), index=False) df = pd.DataFrame({"epoch": epochs, "val_loss": val_loss}) df.to_csv("val_loss_{}.csv".format(model_path), index=False) generate_result_plots(model_name)
opt.resume_path = os.path.join(opt.root_path, opt.resume_path) if opt.pretrain_path: opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path) opt.scales = [opt.initial_scale] for i in range(1, opt.n_scales): opt.scales.append(opt.scales[-1] * opt.scale_step) opt.arch = '{}-{}'.format(opt.model, opt.model_depth) opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset) opt.std = get_std(opt.norm_value) print(opt) with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file: json.dump(vars(opt), opt_file) torch.manual_seed(opt.manual_seed) model, parameters = generate_model(opt) print(model) criterion = nn.CrossEntropyLoss() if not opt.no_cuda: criterion = criterion.cuda() if opt.no_mean_norm and not opt.std_norm: norm_method = Normalize([0, 0, 0], [1, 1, 1]) elif not opt.std_norm: norm_method = Normalize(opt.mean, [1, 1, 1]) else: norm_method = Normalize(opt.mean, opt.std) if not opt.no_train: assert opt.train_crop in ['random', 'corner', 'center'] if opt.train_crop == 'random':
temporal_transform=temporal_transform_test) dataloader_val = DataLoader(dataset_val, batch_size=args.batch_size_val, num_workers=args.num_workers, pin_memory=True) else: print('Loading testing data.....') class_id1 = [i for i in range(1, 41)] dataset_test = dataset_EgoGesture.dataset_video(annot_dir, 'test', spatial_transform=trans_test, temporal_transform=temporal_transform_test) dataloader_test = DataLoader(dataset_test, batch_size=args.batch_size_val, num_workers=args.num_workers, pin_memory=True) model, parameters = generate_model(args) model.to(device) if args.is_train: if args.modality == 'RGB': summary(model, (3, args.clip_len, 112, 112)) elif args.modality == 'Depth': summary(model, (1, args.clip_len, 112, 112)) elif args.modality == 'RGB-D': summary(model, (4, args.clip_len, 112, 112)) model_train(model, save_dir, dataloader_train, dataloader_val) pdb.set_trace() else: model_test(model, model_test_dir, '{}-{}-{}.pth'.format(args.arch, args.clip_len, args.modality), dataloader_test, args.n_finetune_classes) pdb.set_trace()
request.send_header("Content-type", "application/json") request.send_header("Access-Control-Allow-Origin", "*") request.end_headers() if top_ten_diagnoses_response: request.wfile.write(json.dumps(top_ten_diagnoses_response)) return return RequestHandler if __name__ == '__main__': # The model is generated on the server before it starts running the service. # # Note: Model generation would normally happen in a completely separate # environment than the webserver. diagnosis_dictionary = open(DICTIONARY_PATH).read().splitlines() diagnosis_model = generate_model(diagnosis_dictionary) server_class = BaseHTTPServer.HTTPServer HandlerClass = WrapRequestHandler(diagnosis_model, diagnosis_dictionary) httpd = server_class((HOST_NAME, PORT_NUMBER), HandlerClass) try: httpd.serve_forever() except KeyboardInterrupt: pass else: print "Unexpected server exception occurred." finally: httpd.server_close()
parser.add_argument('--model_depth', type=int, default=101) parser.add_argument('--pretrain_path', type=str, default='./checkpoints/resnext-101-kinetics.pth') parser.add_argument('--n_classes', type=int, default=400) parser.add_argument('--n_finetune_classes', type=int, default=400) parser.add_argument('--ft_begin_index', type=int, default=0) parser.add_argument('--resnet_shortcut', type=str, default='B') parser.add_argument('--resnext_cardinality', type=int, default=32) parser.add_argument('--sample_size', type=int, default=112) parser.add_argument('--sample_duration', type=int, default=16) parser.add_argument('--no_cuda', type=bool, default=False) parser.add_argument('--no_train', type=bool, default=True) parser.add_argument('--file_path', type=str, default='./Data') parser.add_argument('--dataset_name', type=str, default='YouTubeClips') parser.add_argument('--frame_per_video', type=int, default=28) parser.add_argument('--start_idx', type=int, default=0) parser.add_argument('--end_idx', type=int, default=1) parser.add_argument('--batch_size', type=int, default=1) opt = parser.parse_args() opt.arch = '{}-{}'.format(opt.model, opt.model_depth) model, _ = generate_model(opt) namelist = os.listdir(os.path.join(opt.file_path, opt.dataset_name)) save_path = os.path.join(opt.file_path, 'Feature_3D') extract_feats(opt.file_path, model, namelist[opt.start_idx:opt.end_idx], opt.frame_per_video, opt.batch_size, save_path)
def main_worker(index, opt): random.seed(opt.manual_seed) np.random.seed(opt.manual_seed) torch.manual_seed(opt.manual_seed) if index >= 0 and opt.device.type == 'cuda': opt.device = torch.device(f'cuda:{index}') opt.is_master_node = not opt.distributed or opt.dist_rank == 0 model = generate_model(opt) print('after generating model:', model.fc.in_features, ':', model.fc.out_features) print('feature weights:', model.fc.weight.shape, ':', model.fc.bias.shape) if opt.resume_path is not None: model = resume_model(opt.resume_path, opt.arch, model) print('after resume model:', model.fc.in_features, ':', model.fc.out_features) print('feature weights:', model.fc.weight.shape, ':', model.fc.bias.shape) # summary(model, input_size=(3, 112, 112)) # if opt.pretrain_path: # model = load_pretrained_model(model, opt.pretrain_path, opt.model, # opt.n_finetune_classes) print('after pretrained model:', model.fc.in_features, ':', model.fc.out_features) print('feature weights:', model.fc.weight.shape, ':', model.fc.bias.shape) print(torch_summarize(model)) # parameters = model.parameters() # for name, param in model.named_parameters(): # if param.requires_grad: # print(name, param.data) # summary(model, (3, 112, 112)) # return # print('model parameters shape', parameters.shape) (train_loader, train_sampler, train_logger, train_batch_logger, optimizer, scheduler) = get_train_utils(opt, model.parameters()) for i, (inputs, targets) in enumerate(train_loader): print('input shape:', inputs.shape) print('targets shape:', targets.shape) outputs = model(inputs) print("output shape", outputs.shape) model_arch = make_dot(outputs, params=dict(model.named_parameters())) print(model_arch) model_arch.render("/apollo/data/model.png", format="png") # Source(model_arch).render('/apollo/data/model.png') # print("generating /apollo/data/model.png") break # make_dot(yhat, params=dict(list(model.named_parameters()))).render("rnn_torchviz", format="png") return if opt.batchnorm_sync: assert opt.distributed, 'SyncBatchNorm only supports DistributedDataParallel.' model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model) if opt.pretrain_path: model = load_pretrained_model(model, opt.pretrain_path, opt.model, opt.n_finetune_classes) if opt.resume_path is not None: model = resume_model(opt.resume_path, opt.arch, model) model = make_data_parallel(model, opt.distributed, opt.device) if opt.pretrain_path: parameters = get_fine_tuning_parameters(model, opt.ft_begin_module) else: parameters = model.parameters() if opt.is_master_node: print(model) criterion = CrossEntropyLoss().to(opt.device) if not opt.no_train: (train_loader, train_sampler, train_logger, train_batch_logger, optimizer, scheduler) = get_train_utils(opt, parameters) if opt.resume_path is not None: opt.begin_epoch, optimizer, scheduler = resume_train_utils( opt.resume_path, opt.begin_epoch, optimizer, scheduler) if opt.overwrite_milestones: scheduler.milestones = opt.multistep_milestones if not opt.no_val: val_loader, val_logger = get_val_utils(opt) if opt.tensorboard and opt.is_master_node: from torch.utils.tensorboard import SummaryWriter if opt.begin_epoch == 1: tb_writer = SummaryWriter(log_dir=opt.result_path) else: tb_writer = SummaryWriter(log_dir=opt.result_path, purge_step=opt.begin_epoch) else: tb_writer = None prev_val_loss = None for i in range(opt.begin_epoch, opt.n_epochs + 1): if not opt.no_train: if opt.distributed: train_sampler.set_epoch(i) current_lr = get_lr(optimizer) train_epoch(i, train_loader, model, criterion, optimizer, opt.device, current_lr, train_logger, train_batch_logger, tb_writer, opt.distributed) if i % opt.checkpoint == 0 and opt.is_master_node: save_file_path = opt.result_path / 'save_{}.pth'.format(i) save_checkpoint(save_file_path, i, opt.arch, model, optimizer, scheduler) if not opt.no_val: prev_val_loss = val_epoch(i, val_loader, model, criterion, opt.device, val_logger, tb_writer, opt.distributed) if not opt.no_train and opt.lr_scheduler == 'multistep': scheduler.step() elif not opt.no_train and opt.lr_scheduler == 'plateau': scheduler.step(prev_val_loss) if opt.inference: inference_loader, inference_class_names = get_inference_utils(opt) inference_result_path = opt.result_path / '{}.json'.format( opt.inference_subset) inference.inference(inference_loader, model, inference_result_path, inference_class_names, opt.inference_no_average, opt.output_topk)
errors_random_filename = "data/decoding-error-random.npy" errors_sorted_1_filename = "data/decoding-error-sorted_1.npy" errors_sorted_2_filename = "data/decoding-error-sorted_2.npy" # Don't recompute if things have been already saved if os.path.exists(errors_random_filename): errors_random = np.load(errors_random_filename) errors_sorted_1 = np.load(errors_sorted_1_filename) errors_sorted_2 = np.load(errors_sorted_2_filename) else: # Build memory n_gate = 1 model = generate_model(shape=(1 + n_gate, 1000, n_gate), sparsity=0.5, radius=0.1, scaling=1.0, leak=1.0, noise=(0, 1e-4, 1e-4)) # Training data n = 25000 values = np.random.uniform(-1, +1, n) ticks = np.random.uniform(0, 1, (n, n_gate)) < 0.01 train_data = generate_data(values, ticks) # Testing data n = 2500 values = smoothen(np.random.uniform(-1, +1, n)) ticks = np.random.uniform(0, 1, (n, n_gate)) < 0.01 test_data = generate_data(values, ticks, last=train_data["output"][-1])
if opt.resume: opt.resume_path = opt.savemodel_path opt.resume_file = os.path.join(opt.resume_path, opt.resume_file) print(opt) if not os.path.exists(opt.result_path): os.makedirs(opt.result_path) if not os.path.exists(opt.savemodel_path): os.makedirs(opt.savemodel_path) with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file: json.dump(vars(opt), opt_file) torch.manual_seed(opt.manual_seed) model, parameters = generate_model(opt) #注意训练图像的通道数不同需要修改模型的输入 print(model) a = np.array([0.01, 0.99], dtype=np.float32) w = torch.from_numpy(a).cuda() criterion = nn.CrossEntropyLoss(weight=w) #criterion = nn.NLLLoss(weight=w) if not opt.no_train: train_image = np.random.normal(size=(258, 1, 512, 512)) import pdb pdb.set_trace() train_label = np.random.randint(0, 2, size=(258, 512, 512)) train_image, train_label = torch.from_numpy( train_image), torch.from_numpy(train_label)
from model import generate_model import tensorflow as tf import numpy as np import os import pickle filehandler = open("user_data.obj", 'rb') user_data = pickle.load(filehandler) model_file_path = './nn_model.HDF5' if os.path.exists(model_file_path): model = tf.keras.models.load_model(model_file_path) else: model = generate_model() img = user_data['img'] action = user_data['action'] history_actions = user_data['history_actions'] history_x_pos = user_data['history_x_pos'] history_y_pos = user_data['history_y_pos'] for index, _ in enumerate(img): model.train_on_batch(x={ 'img': np.expand_dims(img[index], axis=0), 'action': np.expand_dims(np.array(history_actions[index]), axis=0), 'x_position': np.expand_dims(np.array(history_x_pos[index]), axis=0), 'y_position':
def load_models(opt): opt.resume_path = opt.resume_path_det opt.pretrain_path = opt.pretrain_path_det opt.sample_duration = opt.sample_duration_det opt.model = opt.model_det opt.model_depth = opt.model_depth_det opt.modality = opt.modality_det opt.resnet_shortcut = opt.resnet_shortcut_det opt.n_classes = opt.n_classes_det opt.n_finetune_classes = opt.n_finetune_classes_det opt.no_first_lay = opt.no_first_lay_det if opt.root_path != '': opt.video_path = os.path.join(opt.root_path, opt.video_path) opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path) opt.result_path = os.path.join(opt.root_path, opt.result_path) if opt.resume_path: opt.resume_path = os.path.join(opt.root_path, opt.resume_path) if opt.pretrain_path: opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path) opt.scales = [opt.initial_scale] for i in range(1, opt.n_scales): opt.scales.append(opt.scales[-1] * opt.scale_step) opt.arch = '{}-{}'.format(opt.model, opt.model_depth) opt.mean = get_mean(opt.norm_value) opt.std = get_std(opt.norm_value) print(opt) with open( os.path.join(opt.result_path, 'opts_det_{}.json'.format(opt.store_name)), 'w') as opt_file: json.dump(vars(opt), opt_file) torch.manual_seed(opt.manual_seed) detector, parameters = generate_model(opt) if opt.resume_path: opt.resume_path = os.path.join(opt.root_path, opt.resume_path) print('loading checkpoint {}'.format(opt.resume_path)) checkpoint = torch.load(opt.resume_path) assert opt.arch == checkpoint['arch'] detector.load_state_dict(checkpoint['state_dict']) print('Model 1 \n', detector) pytorch_total_params = sum(p.numel() for p in detector.parameters() if p.requires_grad) print("Total number of trainable parameters: ", pytorch_total_params) opt.resume_path = opt.resume_path_clf opt.pretrain_path = opt.pretrain_path_clf opt.sample_duration = opt.sample_duration_clf opt.model = opt.model_clf opt.model_depth = opt.model_depth_clf opt.modality = opt.modality_clf opt.resnet_shortcut = opt.resnet_shortcut_clf opt.n_classes = opt.n_classes_clf opt.n_finetune_classes = opt.n_finetune_classes_clf opt.no_first_lay = opt.no_first_lay_clf if opt.root_path != '': opt.video_path = os.path.join(opt.root_path, opt.video_path) opt.annotation_path = os.path.join(opt.root_path, opt.annotation_path) opt.result_path = os.path.join(opt.root_path, opt.result_path) if opt.resume_path: opt.resume_path = os.path.join(opt.root_path, opt.resume_path) if opt.pretrain_path: opt.pretrain_path = os.path.join(opt.root_path, opt.pretrain_path) opt.scales = [opt.initial_scale] for i in range(1, opt.n_scales): opt.scales.append(opt.scales[-1] * opt.scale_step) opt.arch = '{}-{}'.format(opt.model, opt.model_depth) opt.mean = get_mean(opt.norm_value) opt.std = get_std(opt.norm_value) print(opt) with open( os.path.join(opt.result_path, 'opts_clf_{}.json'.format(opt.store_name)), 'w') as opt_file: json.dump(vars(opt), opt_file) torch.manual_seed(opt.manual_seed) classifier, parameters = generate_model(opt) if opt.resume_path: print('loading checkpoint {}'.format(opt.resume_path)) checkpoint = torch.load(opt.resume_path) assert opt.arch == checkpoint['arch'] if opt.sample_duration_clf < 32 and opt.model_clf != 'c3d': classifier = _modify_first_conv_layer(classifier, 3, 3) classifier = _construct_depth_model(classifier) classifier = classifier.cuda() classifier.load_state_dict(checkpoint['state_dict']) print('Model 2 \n', classifier) pytorch_total_params = sum(p.numel() for p in classifier.parameters() if p.requires_grad) print("Total number of trainable parameters: ", pytorch_total_params) return detector, classifier
mask = np.argmax(mask, axis=0) masks.append(mask) return masks if __name__ == '__main__': # settting sets = parse_opts() sets.target_type = "normal" sets.phase = 'test' # getting model checkpoint = torch.load(sets.resume_path) net, _ = generate_model(sets) net.load_state_dict(checkpoint['state_dict']) # data tensor testing_data =BrainS18Dataset(sets.data_root, sets.img_list, sets) data_loader = DataLoader(testing_data, batch_size=1, shuffle=False, num_workers=1, pin_memory=False) # testing img_names = [info.split(" ")[0] for info in load_lines(sets.img_list)] masks = test(data_loader, net, img_names, sets) # evaluation: calculate dice label_names = [info.split(" ")[1] for info in load_lines(sets.img_list)] Nimg = len(label_names) dices = np.zeros([Nimg, sets.n_seg_classes]) for idx in range(Nimg):
def auc(label, pred): fpr, tpr, _ = metrics.roc_curve(y_true=label.cpu().numpy(), y_score=pred.cpu().numpy()) return metrics.auc(fpr, tpr) args = parseOpts() print(args) class sets: model='resnet' model_depth = 50 os.environ["CUDA_VISIBLE_DEVICES"] = args.gpus model = generate_model(sets) model = model.cuda() # model = nn.DataParallel(model) print("Start Initializing Dataset") ds = NoduleDataset(ct_dir=args.ct_dir, bbox_csv_path=args.bbox_csv, label_csv_path='data/dataset/EGFR/label_simple.csv', skip_missed_npy=True) print("Finish Initializing Dataset") num_fold = 5 one_fold_size = len(ds) // num_fold
def main(): kmodel = model.generate_model() train(kmodel, pathlib.Path("datasets").resolve()) model.save_model(kmodel)
# ----------------------------------------------------------------------------- import numpy as np import matplotlib.pyplot as plt from data import generate_data, smoothen from model import generate_model, train_model, test_model if __name__ == '__main__': # Random generator initialization np.random.seed(1) # Build memory n_gate = 3 model = generate_model(shape=(1 + n_gate, 1000, n_gate), sparsity=0.5, radius=0.1, scaling=0.25, leak=1.0, noise=0.0001) # Training data n = 25000 values = np.random.uniform(-1, +1, n) ticks = np.random.uniform(0, 1, (n, n_gate)) < 0.01 train_data = generate_data(values, ticks) # Testing data n = 2500 values = smoothen(np.random.uniform(-1, +1, n)) ticks = np.random.uniform(0, 1, (n, n_gate)) < 0.01 test_data = generate_data(values, ticks, last=train_data["output"][-1])
# apply softmax and move from gpu to cpu outputs = F.softmax(outputs, dim=1).cpu() # get best class score, class_prediction = torch.max(outputs, 1) print(">>>>", score, class_prediction) # As model outputs a index class, if you have the real class list you can get the output class name # something like this: classes = ['jump', 'talk', 'walk', ...] #if classes != None: # return score[0], classes[class_prediction[0]] return score, class_prediction opt = parse_opts() opt.n_input_channels = 3 opt.mean, opt.std = get_mean_std(opt.value_scale, dataset=opt.mean_dataset) model = generate_model(opt) model.fc = nn.Linear(model.fc.in_features, 2) device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu") model.to(device) pretrain_path = '/DATA/disk1/machinelp/3D-ResNets-PyTorch/qpark_action_2/results/save_30.pth' # pretrain_path = '/DATA/disk1/machinelp/3D-ResNets-PyTorch/qpark_action_2/r3d50_KM_200ep.pth' pretrain = torch.load(pretrain_path, map_location='cpu') model.load_state_dict(pretrain['state_dict']) spatial_transform = get_spatial_transform(opt) # predict( clip, model, spatial_transform, classes=2 ) import cv2 # we create the video capture object cap # cap = cv2.VideoCapture(0) # video_path = '/DATA/disk1/libing/online/vlog-ai-server/src/data/input/ch3_20201102191500_20201102192000.mp4'
opt_prune.scales.append(opt_prune.scales[-1] * opt_prune.scale_step) #opt.arch = '{}-{}'.format(opt.model, opt.model_depth) opt_prune.arch = '{}'.format(opt_prune.model) opt_prune.mean = get_mean(opt_prune.norm_value, dataset=opt_prune.mean_dataset) opt_prune.std = get_std(opt_prune.norm_value) opt_prune.store_name = '_'.join([ opt_prune.dataset, opt_prune.model, opt_prune.modality, str(opt_prune.sample_duration) ]) print(opt_prune) torch.manual_seed(opt_prune.manual_seed) #model model model model model model model model model model, parameters = generate_model( opt_prune) #if opt_prune.pretrain_path , 预装模型初始化和加载 print(model) ''' opt_prune = parse_opts() #opt_prune.pretrain_path = '/home/root5/GeScale/choyaa-GeScale-master/my__netslimming+3D/results/SHGD13_sparsity/SHGD_mobilenetv2_IRD_8_best.pth' #===========initialize opt_prune.scales = [opt_prune.initial_scale] for i in range(1, opt_prune.n_scales): opt_prune.scales.append(opt_prune.scales[-1] * opt_prune.scale_step) # opt.arch = '{}-{}'.format(opt.model, opt.model_depth) opt_prune.arch = '{}'.format(opt_prune.model) opt_prune.mean = get_mean(opt_prune.norm_value, dataset=opt_prune.mean_dataset) opt_prune.std = get_std(opt_prune.norm_value) opt_prune.store_name = '_'.join([opt_prune.dataset, opt_prune.model,
output = model(data) test_loss += criterion(output, target).data[0] # get the index of the max pred = output.data.max(1, keepdim=True)[1] correct += pred.eq(target.data.view_as(pred)).cpu().sum() test_loss /= _n_test_images print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format( test_loss, correct, _n_test_images, 100. * correct / _n_test_images)) # settting sets = parse_opts() model, parameters = generate_model(sets) #debug # print(model) print(f'type(model):{type(model)}') print(f'type(parameters):{type(parameters)}') print(f'type(model.parameters):{type(model.parameters)}') # freeze the pre-trained convolutional network parameters for param in parameters['base_parameters']: param.requires_grad = False # ---------------------------------------------------- # Debug: # for key in parameters.keys():
model_head = resnet.ProjectionHead(args.feature_dim, args.model_depth) elif args.model_type == 'shufflenet': model_head = shufflenet.ProjectionHead(args.feature_dim) elif args.model_type == 'shufflenetv2': model_head = shufflenetv2.ProjectionHead(args.feature_dim) elif args.model_type == 'mobilenet': model_head = mobilenet.ProjectionHead(args.feature_dim) elif args.model_type == 'mobilenetv2': model_head = mobilenetv2.ProjectionHead(args.feature_dim) if args.use_cuda: model_head.cuda() if args.resume_path == '': # ===============generate new model or pre-trained model=============== model = generate_model(args) optimizer = torch.optim.SGD(model.parameters(), lr=args.learning_rate, momentum=args.momentum, dampening=dampening, weight_decay=args.weight_decay, nesterov=args.nesterov) nce_average = NCEAverage(args.feature_dim, len_neg, len_pos, args.tau, args.Z_momentum) criterion = NCECriterion(len_neg) begin_epoch = 1 best_acc = 0 memory_bank = [] else: # ===============load previously trained model =============== args.pre_train_model = False
opt.scales = [opt.initial_scale] for i in range(1, opt.n_scales): opt.scales.append(opt.scales[-1] * opt.scale_step) if opt.model == 'mobilenet' or opt.model == 'mobilenetv2': opt.arch = opt.model else: opt.arch = '{}-{}'.format(opt.model, opt.model_depth) opt.mean = get_mean(opt.norm_value, dataset=opt.mean_dataset) opt.std = get_std(opt.norm_value) print(opt) with open(os.path.join(opt.result_path, 'opts.json'), 'w') as opt_file: json.dump(vars(opt), opt_file) torch.manual_seed(opt.manual_seed) model, parameters = generate_model(opt) print(model) criterion = nn.CrossEntropyLoss() if not opt.no_cuda: criterion = criterion.cuda() if opt.no_mean_norm and not opt.std_norm: norm_method = Normalize([0, 0, 0], [1, 1, 1]) elif not opt.std_norm: norm_method = Normalize(opt.mean, [1, 1, 1]) else: norm_method = Normalize(opt.mean, opt.std) if not opt.no_train: assert opt.train_crop in ['random', 'corner', 'center'] if opt.train_crop == 'random':
from model import generate_model3, generate_model2, generate_model, generate_classifier from keras.preprocessing import image import numpy as np import os import cv2 import json input_dirs = [] save_dir = "collection_cut3_2" save_orig = False full_save_dir = "full_save_dir" cutoff = 0.8 m2 = generate_model2() m = generate_model3() m3 = generate_model() m.load_weights("weights/lastBry.h5") m2.load_weights("weights/lastNoDropout.h5") m3.load_weights("weights/lastChecker3.h5") saving_data = False save_file = "metadata2.txt" if not os.path.exists(save_dir): print("creating_save_dir") os.makedirs(save_dir) if not os.path.exists(full_save_dir): print("creating_save_dir") os.makedirs(full_save_dir) def use_cutoff(imgname, cutoff=0.5): if imgname[-3:] != "jpg": return None, False try: