def main(_): dataset = cfg.dataset input_shape, num_classes = get_dataset_values(dataset, cfg.batch_size) tf.logging.info("Initializing CNN for {}...".format(dataset)) model = VGGNet(input_shape, num_classes, is_training=True) tf.logging.info("Finished initialization.") if not os.path.exists(cfg.logdir): os.mkdir(cfg.logdir) logdir = os.path.join(cfg.logdir, model.name) if not os.path.exists(logdir): os.mkdir(logdir) logdir = os.path.join(logdir, dataset) if not os.path.exists(logdir): os.mkdir(logdir) sv = tf.train.Supervisor(graph=model.graph, logdir=logdir, save_model_secs=0) tf.logging.info("Initialize training...") train(model, sv, dataset) tf.logging.info("Finished training.")
def train(n_epoch, trainloader): vgg_model = VGGNet(model=args.model, requires_grad=True) model = FCN8s(pretrained_net=vgg_model, n_class=args.n_class) model = model.to(device) optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum, weight_decay=args.weight_decay) criterion = nn.BCELoss() loss_li = [] for epoch in range(n_epoch): running_loss = 0.0 for i, data in enumerate(trainloader): sample = data images = sample['image'] images = images.float() labels = sample['label'] labels = labels.float() images = Variable(images.cuda()) labels = Variable(labels.cuda(), requires_grad=False) optimizer.zero_grad() output = model(images) output = torch.sigmoid(output) loss = criterion(output, labels) loss.backward() optimizer.step() running_loss += loss.item() if i % 10 == 9: # print every 10 mini-batches print('Epoch: %d, Loss: %.4f' % (epoch + 1, running_loss / 10)) loss_li.append(running_loss) running_loss = 0.0 plt.figure() plt.plot(loss_li) plt.savefig('./running_loss.png') return model
for slice_, scan in enumerate( ['dwi', 'flair', 't1', 't2', 'label', 'predicted']): ax = plt.subplot(1, 6, slice_ + 1) show_single_img(scans_stack[:, :, slice_], (scan == 'label' or scan == 'predicted')) plt.tight_layout() ax.set_title(scan) ax.axis('off') plt.show() # pdb.set_trace() # plt.savefig('testing/'+ str(epoch)+ '.jpg') vgg_model = VGGNet(freeze_max=False) net = FCN8s(vgg_model) checkpoint = torch.load('baseline.pth') net.load_state_dict(checkpoint['model_state_dict']) device = torch.device('cuda:0') if torch.cuda.is_available() else torch.device( 'cpu') net.to(device) # train dataloader scale = Rescale(int(1.5 * 230)) crop = RandomCrop(224) rotate = RandomRotate(20.0)
batch_size=16, shuffle=True, num_workers=0) num_classes = 16 #len(train_data) print(num_classes) # Dataloader: TEST test_data = LoadImages(img_dir=TEST_IMAGE_DIR, label_dir=TEST_LABEL_DIR, transform=transforms.ToTensor()) testloader = DataLoader(dataset=test_data, batch_size=16, shuffle=True, num_workers=0) # VGG vgg_net = VGGNet(1, num_classes=num_classes) vgg_net = vgg_net.float() #.to(device) # Loss criterion = RMSELoss() optimizer = optim.Adam(params=vgg_net.parameters(), lr=0.0001) # Number of iterations total_samples = len(train_data) n_iterations = math.ceil(total_samples / 16) print(total_samples) # Training mean_losses = [] for epoch in range(N_EPOCHS):
parser.add_argument('--image', type=str, default='../data/dog.png', help='Input image') parser.add_argument( '--test_timing', type=int, default=0, help='Test timing with multiple forward pass iterations') args = parser.parse_args() # Model print('==> Building model...') if args.mtype == 'pytorch': model = VGGNet('D-DSM', num_classes=args.classes, input_size=args.input) # depthwise separable # Load model print('==> Loading PyTorch model...') model.load_state_dict(try_load(args.model)) model.eval() model.to(device) else: print('==> Loading Torch Script model...') # Load ScriptModule from io.BytesIO object with open(args.model, 'rb') as f: buffer = io.BytesIO(f.read()) model = torch.jit.load(buffer, map_location=device) #print('[WARNING] ScriptModules cannot be moved to a GPU device yet. Running strictly on CPU for now.') #device = torch.device('cpu') # 'to' is not supported on TracedModules (yet)
type=str, default='../data/VGG16model.pth', help='Model to trace') parser.add_argument('--save', type=str, default='../data/VGG16', help='Traced model save path') args = parser.parse_args() example_input = torch.rand(1, 3, args.input, args.input) # TracedModule objects do not inherit the .to() or .eval() methods if args.mode == 'train': print('==> Building model...') model = VGGNet('D-DSM', num_classes=args.classes, input_size=args.input) #model.to(device) model.train() # convert to Torch Script print('==> Tracing model...') traced_model = trace(model, example_input) # save model for training traced_model.save(args.save + '-traced-train.pt') else: # load "normal" pytorch trained model print('==> Building model...') model = VGGNet('D-DSM', num_classes=args.classes,
(0.2023, 0.1994, 0.2010)), ]) trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform_train) train_loader = DataLoader(trainset, batch_size=32 if args.upscale else 128, shuffle=True, num_workers=4) # Model print('==> Building model...') #model = VGGNet('D', num_classes=10, input_size=32) # VGG16 is configuration D (refer to paper) model = VGGNet('D-DSM', num_classes=num_classes, input_size=input_size) # depthwise separable model = model.to(device) if device.type == 'cuda': cudnn.benchmark = True model = torch.nn.DataParallel(model) # Training num_epochs = 200 lr = 0.1 # define loss function (criterion) and optimizer criterion = nn.CrossEntropyLoss() optimizer = torch.optim.SGD(model.parameters(), lr, momentum=0.9, weight_decay=5e-4)