os.makedirs(output_dir) sampler_batch = sampler(train_size, args.batch_size) dataset = roibatchLoader(roidb) dataloader = torch.utils.data.DataLoader(dataset, batch_size=args.batch_size, sampler=sampler_batch, num_workers=args.num_workers) # initilize the network here. if args.net == 'c3d': tdcnn_demo = c3d_tdcnn(pretrained=True) elif args.net == 'i3d': tdcnn_demo = i3d_tdcnn(pretrained=True) elif args.net == 'res34': tdcnn_demo = resnet_tdcnn(depth=34, pretrained=True) elif args.net == 'res50': tdcnn_demo = resnet_tdcnn(depth=50, pretrained=True) else: print("network is not defined") pdb.set_trace() tdcnn_demo.create_architecture() print(tdcnn_demo) lr = args.lr #tr_momentum = cfg.TRAIN.MOMENTUM #tr_momentum = args.momentum
num_workers=args.num_workers) # initilize the tensor holder here. #video_data = torch.FloatTensor(1) #torch.FloatTensor(1, 3, 768, 112, 112) #gt_twins = torch.FloatTensor(1) device = torch.device("cuda:0" if args.cuda else "cpu") if args.cuda: cfg.CUDA = True # initilize the network here. if args.net == 'c3d': tdcnn_demo = c3d_tdcnn(class_agnostic=cfg.AGNOSTIC, pretrained=True) elif args.net == 'i3d': tdcnn_demo = i3d_tdcnn(class_agnostic=cfg.AGNOSTIC, pretrained=True) elif args.net == 'res34': tdcnn_demo = resnet_tdcnn(depth=34, class_agnostic=cfg.AGNOSTIC, pretrained=True) elif args.net == 'res50': tdcnn_demo = resnet_tdcnn(depth=50, class_agnostic=cfg.AGNOSTIC, pretrained=True) else: print("network is not defined") pdb.set_trace() tdcnn_demo.create_architecture() lr = cfg.TRAIN.LEARNING_RATE