def main(argv): parser = argparse.ArgumentParser( description='Cassava Disease Classification') parser.add_argument("-m", "--model_path", help="Model path extention.", type=str) args = parser.parse_args(argv[1:]) model_path = args.model_path print("Model path is %s" % (model_path)) print("Program started ") print("Creating a transform ... ") transform = transforms.Compose([ transforms.ToPILImage(), torchvision.transforms.Resize((C.AVERAGE_HEIGHT, C.AVERAGE_WIDTH)), transforms.ToTensor() # transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)) ]) # transform = None print("Creating train data set ... ") train_data_set = data_loader.CassavaImagesDataset(C.TRAIN_DATA_PATH, C.TRAIN_LABEL_PATH, transform) print("Creating train data loader ... ") train_loader = torch.utils.data.DataLoader(train_data_set, batch_size=4, shuffle=True, num_workers=2) print("Creating val data set ... ") val_data_set = data_loader.CassavaImagesDataset(C.VAL_DATA_PATH, C.VAL_LABEL_PATH, transform) print("Creating a val data loader ... ") val_loader = torch.utils.data.DataLoader(val_data_set, batch_size=4, shuffle=False, num_workers=2) print("Creating a model ... ") casava_model = model.ResNet() if model_path is not None: # if there is a saved model, try to load it try: casava_model.load_state_dict(torch.load(model_path)) print("Model %s loaded " % (model_path)) except Exception as e: print("Failed to load model %s. %s" % (model_path, str(e))) train.train(train_loader, casava_model)
def main(): number_of_games = 1 model_file = './model_1' device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') network = model.ResNet(model.ResidualBlock, [3, 4, 6, 3]).to(device) network.load_model(model_file) network.eval() x = torch.FloatTensor(range(-8, 8 + 1)).to(device) env = Curling(0.3) rewards = [] for _ in range(number_of_games): state = env.reset() for t in range(16): inputs = utils.to_input(state[0], t).unsqueeze(0) p_out, v_out = network(inputs.to(device)) topk_actions_idx2coor = [] topk_actions = p_out.topk(32, 1, True, True)[1][0] for t_a in topk_actions: t_a = utils.idx_to_action_xy(t_a.item()) topk_actions_idx2coor.append(t_a) if t % 2 == 1: # action, highlight = max_exp_shot(env, network, device, topk_actions_idx2coor, t, x) # action, highlight = top_1_shot(p_out) action, highlight = prob_dist(p_out) # if t == 15: # action, highlight = stable_shot(env, network, device, topk_actions_idx2coor, t) # else: # action, highlight = max_exp_shot(env, network, device, topk_actions_idx2coor, t, x) else: action, highlight = prob_dist(p_out) # action, highlight = top_1_shot(p_out) state, reward, done = env.step(action) env.render(True, topk_actions_idx2coor, highlight) print(-reward) rewards.append(-reward) print(rewards)
def load_network(config, resume=True): global net global net_epoch global time_used if config.network == 'resnet': net = model.ResNet(config) elif config.network == 'resnet_v2': net = model.ResNet_v2(config) elif config.network == 'unet': net = model.UNet(config) net_epoch = 0 time_used = 0 if resume: print('-- Loading Parameters') assert os.path.isdir( 'checkpoint'), '-- Error: No Checkpoint Directory Found!' checkpoint = torch.load('./checkpoint/network.nn') net = checkpoint['net'] net_epoch = int(checkpoint['epoch']) time_used = float(checkpoint['time']) if use_cuda: net = net.cuda() utils.print_network(net)
hdf5_clean=args.valid_hdf_path + 'valid_clean.hdf', hdf5_attack=args.valid_hdf_path + 'valid_attack.hdf', max_nb_frames=args.n_frames, n_cycles=args.valid_n_cycles) valid_loader = torch.utils.data.DataLoader( valid_dataset, batch_size=args.valid_batch_size, shuffle=False, worker_init_fn=set_np_randomseed) else: valid_loader = None if args.model == 'lstm': model = model_.cnn_lstm(nclasses=args.n_classes) elif args.model == 'resnet': model = model_.ResNet(nclasses=args.n_classes) elif args.model == 'resnet_pca': model = model_.ResNet_pca(nclasses=args.n_classes) elif args.model == 'lcnn_9': model = model_.lcnn_9layers(nclasses=args.n_classes) elif args.model == 'lcnn_29': model = model_.lcnn_29layers_v2(nclasses=args.n_classes) elif args.model == 'lcnn_9_pca': model = model_.lcnn_9layers_pca(nclasses=args.n_classes) elif args.model == 'lcnn_29_pca': model = model_.lcnn_29layers_v2_pca(nclasses=args.n_classes) elif args.model == 'lcnn_9_icqspec': model = model_.lcnn_9layers_icqspec(nclasses=args.n_classes) elif args.model == 'lcnn_9_prodspec': model = model_.lcnn_9layers_prodspec(nclasses=args.n_classes) elif args.model == 'lcnn_9_CC':
BATCH = [32] LR = [0.001] is_dropout = True for is_adam in opti: for Batch_size in BATCH: for learning_rate in LR: # set each using t.utils.data.DataLoader and ChallengeDataset objects train_DL = t.utils.data.DataLoader(df_train, batch_size=Batch_size, shuffle=True) val_DL = t.utils.data.DataLoader(df_val, batch_size=Batch_size) print(len(train_DL), len(val_DL)) # create an instance of our ResNet model resnet = model.ResNet() # set up a suitable loss criterion (you can find a pre-implemented loss functions in t.nn) criteria = t.nn.BCELoss() # set up the optimizer (see t.optim) path = f'/For_{Batch_size}_{learning_rate}' if not os.path.isdir('./Adam'): os.mkdir('./Adam') if not os.path.isdir('./Final_Models'): os.mkdir('./Final_Models') # is_adam = False if is_adam: path = './Adam' + path optimizer = t.optim.Adam(resnet.parameters(),
if args.cp_path is None: raise ValueError( 'There is no checkpoint/model path. Use arg --cp-path to indicate the path!' ) if os.path.isfile(args.out_path): os.remove(args.out_path) print(args.out_path + ' Removed') if args.cuda: device = get_freer_gpu() if args.model_la == 'lstm': model_la = model_.cnn_lstm() elif args.model_la == 'resnet': model_la = model_.ResNet() elif args.model_la == 'resnet_pca': model_la = model_.ResNet_pca() elif args.model_la == 'lcnn_9': model_la = model_.lcnn_9layers() elif args.model_la == 'lcnn_29': model_la = model_.lcnn_29layers_v2() elif args.model_la == 'lcnn_9_pca': model_la = model_.lcnn_9layers_pca() elif args.model_la == 'lcnn_29_pca': model_la = model_.lcnn_29layers_v2_pca() elif args.model_la == 'lcnn_9_icqspec': model_la = model_.lcnn_9layers_icqspec() elif args.model_la == 'lcnn_9_prodspec': model_la = model_.lcnn_9layers_prodspec() elif args.model_la == 'lcnn_9_CC':
import torch import numpy as np from tqdm import tqdm import model import data_pipe from data_pipe import offset from config import get_config races = ['Caucasian', 'African', 'Asian', 'Indian'] conf = get_config() conf.meta = True ir_se101_model = model.ResNet() ir_se101_model.load_state_dict(torch.load('models/model_ir_se101.pth')) ir_se101_model.cuda() ir_se101_model.eval() loader, class_num = data_pipe.get_train_loader(conf) total_class = 0 for race in races: total_class += class_num[race] class_count = torch.zeros(total_class) kernel = torch.zeros(512, total_class) for race in races: for imgs, labels in tqdm(iter(loader[race])): imgs = imgs.cuda() out = ir_se101_model(imgs).cpu().detach() for i in range(len(labels)):
import torch as t from trainer import Trainer import sys import model import torchvision as tv epoch = int(sys.argv[1]) #TODO: Enter your model here model = model.ResNet() crit = t.nn.BCEWithLogitsLoss() trainer = Trainer(model, crit) trainer.restore_checkpoint(epoch) trainer.save_onnx('checkpoint_{:03d}.onnx'.format(epoch))
test_sample = test_data.reshape(-1, 100, 100, 100) X_test = test_sample.transpose(0, 2, 3, 1) X_test = X_test.reshape(60, 100, 100, 100, -1) Y_test = labels_test import keras from keras.layers import Dropout, Dense, Conv3D, ZeroPadding3D, Add, Input, AveragePooling3D, MaxPooling3D, Activation, BatchNormalization, Flatten from keras.models import Model from keras.initializers import glorot_uniform X_train = X_train.transpose(0, 2, 3, 1) X_train = X_train.reshape(70, 100, 100, 100, -1) Y_train = keras.utils.to_categorical(Y_train, 2) Y_test = keras.utils.to_categorical(Y_test, 2) import model mod = model.ResNet(input_shape = (100, 100, 100, 1), classes = 2) mod.compile(optimizer = 'adam', loss = 'categorical_crossentropy', metrics = ['accuracy']) mod.summary() mod.fit(X_train, Y_train, epochs = 100, batch_size = 8) preds = mod.evaluate(X_test, Y_test) print('Test loss: %0.2f' % (preds[1] * 100))
import mxnet as mx import model as res net = res.ResNet() net.initialize(init=mx.init.Xavier)
def main(args): #device configuration if args.cpu != None: device = torch.device('cpu') elif args.gpu != None: if not torch.cuda_is_available(): print("GPU / cuda reported as unavailable to torch") exit(0) device = torch.device('cuda') else: device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') # Create model directory if not os.path.exists(args.model_save_dir): os.makedirs(args.model_save_dir) train_data = ld.get_data(labels_file=args.labels_file, root_dir=args.train_image_dir, mode="absolute") validation_data = ld.get_data(labels_file=args.labels_file, root_dir=args.validation_image_dir, mode="absolute") train_loader = DataLoader(dataset=train_data, batch_size=args.batch_size, shuffle=True) val_loader = DataLoader(dataset=validation_data, batch_size=args.validation_batch_size) # Build the models if args.num_layers != None and args.block_type != None: if args.block_type == "bottleneck": net = model.ResNet(model.Bottleneck, args.num_layers, dropout=args.dropout) else: net = model.ResNet(model.BasicBlock, args.num_layers, dropout=args.dropout) else: if args.resnet_model == 152: net = model.ResNet152(args.dropout) elif args.resnet_model == 101: net = model.ResNet101(args.dropout) elif args.resnet_model == 50: net = model.ResNet50(args.dropout) elif args.resnet_model == 34: net = model.ResNet34(args.dropout) else: net = model.ResNet101(args.dropout) #load the model to the appropriate device net = net.to(device) params = net.parameters() # Loss and optimizer criterion = nn.MSELoss() #best for regression if args.optim != None: if args.optim == "adadelta": optimizer = torch.optim.Adadelta(params, lr=args.learning_rate) if args.optim == "adagrad": optimizer = torch.optim.Adagrad(params, lr=args.learning_rate) if args.optim == "adam": optimizer = torch.optim.Adam(params, lr=args.learning_rate) if args.optim == "adamw": optimizer = torch.optim.AdamW(params, lr=args.learning_rate) if args.optim == "rmsprop": optimizer = torch.optim.RMSProp(params, lr=args.learning_rate) if args.optim == "sgd": optimizer = torch.optim.SGD(params, lr=args.learning_rate) else: optimizer = torch.optim.Adam(params, lr=args.learning_rate) val_acc_history = [] train_acc_history = [] failed_runs = 0 prev_loss = float("inf") for epoch in range(args.num_epochs): running_loss = 0.0 total_loss = 0.0 for i, (inputs, labels) in enumerate(train_loader, 0): net.train() #adjust to output image coordinates inputs, labels = inputs.to(device), labels.to(device) optimizer.zero_grad() outputs = net(inputs.float()) loss = criterion(outputs.float(), labels.float()) loss.backward() torch.nn.utils.clip_grad_norm_(net.parameters(), args.clipping_value) optimizer.step() running_loss += loss.item() total_loss += loss.item() if i % 2 == 0: #print every mini-batches print('[%d, %5d] loss: %.5f' % (epoch + 1, i + 1, running_loss / 2)) running_loss = 0.0 loss = 0.0 #compute validation loss at the end of the epoch for i, (inputs, labels) in enumerate(val_loader, 0): inputs, labels = inputs.to(device), labels.to(device) net.eval() with torch.no_grad(): outputs = net(inputs.float()) loss += criterion(outputs, labels.float()).item() print("------------------------------------------------------------") print("Epoch %5d" % (epoch + 1)) print("Training loss: {}, Avg Loss: {}".format( total_loss, total_loss / train_data.__len__())) print("Validation Loss: {}, Avg Loss: {}".format( loss, loss / validation_data.__len__())) print("------------------------------------------------------------") val_acc_history.append(loss) train_acc_history.append(total_loss) #save the model at the desired step if (epoch + 1) % args.save_step == 0: torch.save(net.state_dict(), args.model_save_dir + "resnet" + str(epoch + 1) + ".pt") ##stopping conditions if failed_runs > 5 and prev_loss < loss: break elif prev_loss < loss: failed_runs += 1 else: failed_runs = 0 prev_loss = loss #create a plot of the loss plt.title("Training vs Validation Accuracy") plt.xlabel("Training Epochs") plt.ylabel("Loss") plt.plot(range(1, len(val_acc_history) + 1), val_acc_history, label="Validation loss") plt.plot(range(1, len(train_acc_history) + 1), train_acc_history, label="Training loss") plt.xticks(np.arange(1, len(train_acc_history) + 1, 1.0)) plt.legend() plt.ylim((0, max([max(val_acc_history), max(train_acc_history)]))) if args.save_training_plot != None: plt.savefig(args.save_training_plot + "loss_plot.png") plt.show() print('Finished Training')
vis.line(np.asarray([l_a_top1]), np.asarray([epoch]), win=l_top1_win, update='append', opts=dict(title="shot_top1")) net.train() if __name__ == '__main__': # custom_dataset = CURL('./data', train=False, hammer=True) # for i in range(1000): # print(custom_dataset[i][1]) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') network = model.ResNet(model.ResidualBlock, [3, 4, 6, 3]).to(device) network.load_model('./model_1') optimizer = torch.optim.Adam(network.parameters(), lr=learning_rate, weight_decay=1e-6) criterion = nn.CrossEntropyLoss() # [50, 120, 160] scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[15, 30, 60], gamma=0.1) # transformations = transforms.Compose([transforms.ToTensor()]) train_dataset = CURL('./data', train=True, hammer=False) train_loader = DataLoader(train_dataset,
loader_train = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN))) loader_train_test = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=sampler.SubsetRandomSampler(random.sample(range(NUM_TRAIN), 1000))) loader_val = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=sampler.SubsetRandomSampler(range(NUM_TRAIN, 50000))) loader_test = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=sampler.SubsetRandomSampler(range(50000, 60000))) dropout = 0 hidden1 = 128 num_classes = 10 model = nn.Sequential( nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1), model.ResNet(channels=16, layers=9, same_shape=True), model.ResNet(channels=32, layers=9, same_shape=False), model.ResNet(channels=64, layers=9, same_shape=False), nn.BatchNorm2d(64), nn.ReLU(), nn.AvgPool2d(kernel_size=8), model.Flatten(), nn.Linear(64, 10) ) nn.init.kaiming_normal_(model[0].weight, nonlinearity='relu') nn.init.kaiming_normal_(model[8].weight, nonlinearity='relu') optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4, nesterov=True) if TRAIN:
range(NUM_TRAIN, 50000))) loader_test = DataLoader(dataset, batch_size=BATCH_SIZE, sampler=sampler.SubsetRandomSampler( range(50000, 60000))) dropout = 0 hidden1 = 128 num_classes = 10 model = nn.Sequential( nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=1), model.ResNet(channels=16, layers=9, same_shape=True), model.ResNet(channels=32, layers=9, same_shape=False), model.ResNet(channels=64, layers=9, same_shape=False), nn.BatchNorm2d(64), nn.ReLU(), nn.AvgPool2d(kernel_size=8), model.Flatten(), nn.Linear(64, 10)) nn.init.kaiming_normal_(model[0].weight, nonlinearity='relu') nn.init.kaiming_normal_(model[8].weight, nonlinearity='relu') optimizer = optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=1e-4, nesterov=True) train(model, optimizer, epochs=300)
params.isInVideo = False # Is Input video or still image """ -------------------------------------------------------------------------""" """ initialization """ trainLoader = dataset.loadTrain(params) testLoader = dataset.loadTest(params) device = torch.device("cuda:0" if ( torch.cuda.is_available() and params.nGPU > 0) else "cpu") """ -------------------------------------------------------------------------""" """ Create Networks & Optimizers""" if (params.startEpoch == 0): # Create the Classifier if (params.model == 'custom'): net = model.Custom(params).to(device) elif (params.model == 'resnet'): net = model.ResNet(params).to(device) elif (params.model == 'custom3D'): net = model.Custom3D(params).to(device) elif (params.model == 'resnet3d'): net = model.ResNet3D(params).to(device) elif (params.model == 'resnet2p1d'): net = model.ResNet2p1D(params).to(device) if (device.type == 'cuda') and (params.nGPU > 1): net = nn.DataParallel(net, list(range(params.nGPU))) # Setup Adam optimizer optimizer = optim.Adam(net.parameters(), lr=params.lr, betas=(params.beta1, params.beta2))
) if os.path.isfile(args.out_path): os.remove(args.out_path) print(args.out_path + ' Removed') print('Cuda Mode is: {}'.format(args.cuda)) print('Selected model is: {}'.format(args.model)) if args.cuda: device = get_freer_gpu() if args.model == 'lstm': model = model_.cnn_lstm() elif args.model == 'resnet': model = model_.ResNet() elif args.model == 'resnet_pca': model = model_.ResNet_pca() elif args.model == 'lcnn_9': model = model_.lcnn_9layers() elif args.model == 'lcnn_29': model = model_.lcnn_29layers_v2() elif args.model == 'lcnn_9_pca': model = model_.lcnn_9layers_pca() elif args.model == 'lcnn_29_pca': model = model_.lcnn_29layers_v2_pca() elif args.model == 'lcnn_9_icqspec': model = model_.lcnn_9layers_icqspec() elif args.model == 'lcnn_9_prodspec': model = model_.lcnn_9layers_prodspec() elif args.model == 'lcnn_9_CC':
import tensorflow as tf import model import getdata from parameter import * data_batch, label_batch = getdata.getTFrecorddata(data_dir, minafterdequeue, batchsize, capacity) pred = model.ResNet(data_batch, weights_dic, biases_dic) print('Start training!') # Define loss and optimizer cost = tf.reduce_sum( tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=label_batch)) optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Evaluate model correct_pred = tf.equal(tf.argmax(pred, 1), tf.argmax(label_batch, 1)) accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32)) # Initializing the variables saver = tf.train.Saver() init = (tf.global_variables_initializer(), tf.local_variables_initializer()) #start a session with tf.Session() as sess: sess.run(init) tf.train.start_queue_runners(sess=sess) #summary_writer = tf.train.SummaryWriter('/tmp/logs', graph_def=sess.graph_def) step = 1 # Keep training until reach max iterations
dirlist = os.listdir('Test_Data') dir = {'Caucasian': [], 'African': [], 'Asian': [], 'Indian': []} for item in dirlist: if item.startswith('Caucasian'): dir['Caucasian'].append(item) elif item.startswith('African'): dir['African'].append(item) elif item.startswith('Asian'): dir['Asian'].append(item) elif item.startswith('Indian'): dir['Indian'].append(item) input = dict() ir_se101_models = dict() for race in target: ir_se101_models[race] = model.ResNet() ir_se101_models[race].load_state_dict( torch.load('models/model_ir_se101_final_{}.pth'.format(race))) # ir_se101_models[race].load_state_dict(torch.load('models/model_ir_se101.pth'.format(race))) ir_se101_models[race].cuda() ir_se101_models[race].eval() input[race] = list(map(get_img, dir[race])) input[race] = torch.cat(input[race], 0) num = input[race].size()[0] with torch.no_grad(): for i in tqdm(range(0, num, args.batch_size)): if i + args.batch_size > num: batch = input[race][i:].cuda()