Esempio n. 1
0
def main():
    #load the most recent h5
    directory = c.best_checkpoint_dir
    #best_checkpoints = glob.glob('./checkpoints/model_900_0.49655.h5')
    files = sorted(filter(
        lambda f: os.path.isfile(f) and f.endswith(".h5"),
        map(lambda f: os.path.join(directory, f), os.listdir(directory))),
                   key=os.path.getmtime)
    best_model = files[-1]
    print("Loaded %s" % (best_model))

    input_shape = (256, 256, 3)
    model = convolutional_model(input_shape, batch_size=c.batch_size)
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    model.load_weights(best_model)
    test_dataset = load_dataset('test')
    loader_test = loader(test_dataset, c.batch_size)
    test_steps = len(test_dataset['label']) / c.batch_size

    accs = []
    for i in range(test_steps):
        x_test, y_test = loader_test.next()
        _loss, _acc = model.test_on_batch(x_test, y_test)
        accs.append(_acc)
    print(np.mean(np.array(accs)))
Esempio n. 2
0
def pixel_wise_train(device=0):
	ld = loader(minibatch=100)
	_model = unet()

	def correct_fun(correct, _len):
		return correct.item()*1./(_len)

	savePath = "./save/"

	train(_model, ld, correct_fun=correct_fun, num_epoch=1000, device=0, savePath=savePath)
Esempio n. 3
0
    def __getitem__(self, idx):
        row = self.dat.iloc[idx]

        data_dict = utils.loader(row.data_path)
        out ={}
        for key in ['acceleration']:
            out[key] = data_dict[key].astype("float")
        out['camera'] = tr_rgb(data_dict['camera'])
        out['depth_frame'] = tr_depth(data_dict['depth_frame'])
        out['action'] = action2idx.get(row.action,0)

        return out
def test():
    Test_loader = loader(config.test_file, batch_size = config.batch_size, Train=False)
    config = Config()
    agent = Agent(config)
    agent.restore()
    test_dic = Test_loader.get_batch()
    test_y = test_dic["labels"]
    test_out, test_loss = agent.test(test_dic)
    test_pred = out_to_predict(test_out)
    precision , recall, accuracy , f1  = evaluate(test_pred , test_out)
    print "Final Test Evaluation"
    print "precision = {}, recall = {}, accuracy = {}, f1 = {}".format(precision , recall, accuracy , f1)
def train():
    config = Config()
    agent = Agent(config)
    Dev_loader = loader("data/" + config.dev_file + ".npz", batch_size = config.batch_size, Train = False)
    num_dev_batches = Dev_loader.max_batch
    train_losses  = []
    dev_losses = []
    lowest_loss = 1e+10
    for i in range(config.n_epoch):
        train_loss = 0.0
        total_trained_batches = 0
        for j in range(num_training_volumes):
            Train_loader = loader("data/" + config.train_file + "_vol_" + str(j) + ".npz", batch_size = config.batch_size, Train = True)
            num_train_batches = Train_loader.max_batch
            total_trained_batches += num_train_batches
            for k in range(num_train_batches):
                train_dic = Train_loader.get_batch()
                train_loss+=agent.run_train_step(train_dic)
        avg_train_loss = train_loss/total_trained_batches
        dev_loss = 0.0
        dev_preds = []
        dev_ys = []
        for k in range(num_dev_batches):
            dev_dic = Dev_loader.get_batch()
            dev_batch_out, dev_batch_loss = agent.test(dev_dic)      
            dev_pred = out_to_predict(dev_batch_out, version=2)
            dev_y = dev_dic["labels"]
            dev_ys += list(dev_y)
            dev_preds += list(dev_pred)
            dev_loss += dev_batch_loss
        precision , recall, accuracy , f1  = evaluate(np.array(dev_preds) , np.array(dev_ys))
        avg_dev_loss = dev_loss/num_dev_batches
        logging(i+1, avg_train_loss , avg_dev_loss , precision , recall, accuracy , f1)
        if avg_dev_loss < lowest_loss:
            agent.save(i+1)
            lowest_loss = avg_dev_loss
        Train_loader.reset_loader()
        Dev_loader.reset_loader()
Esempio n. 6
0
def pixel_wise_train_direct(data, mask, device=0):
	ld = loader(data, mask, minibatch=5)
	_model = unet()

	def correct_fun(correct, _len):
		return correct.item()*1./(_len)

	savePath = "./save/"
	if not os.path.exists(savePath):
		os.mkdir(savePath)
	# savePath = None

	# loss = nn.SmoothL1Loss()
	loss = torch.nn.CrossEntropyLoss()

	train(_model, ld, correct_fun=correct_fun, num_epoch=1000, device=0, savePath=savePath, loss=loss)
Esempio n. 7
0
 def playViewer(self, folder):
     """
     Opens a window and displays all images from a folder.
     The images are loaded asynchronously by a separate thread.
     @param folder: path to folder
     @type folder: str
     """
     if self.dock.isClosed:
         # reinit form
         self.initWins()
     else:
         # clear form
         self.listWdg.clear()
     # build generator:
     fileListGen = self.doGen(folder, withsub=self.actionSub.isChecked())
     self.dock.setWindowTitle(folder)
     self.newWin.showMaximized()
     # launch loader instance
     thr = loader(fileListGen, self.listWdg)
     thr.start()
Esempio n. 8
0
episode = max(tubs)
data_dir = f"{data_dir}/{episode}"
print(episode)
#%%
files = os.listdir(data_dir)
files.sort()

event_files = [f for f in files if "event" in f]

#%%
L = []
for filename in event_files:
    try:
        path = f"{data_dir}/{filename}"
        l = utils.loader(path)
        L.append(l)
    except:
        logging.warning(f"Failed to load {path}")
#%%
dat = pd.DataFrame(L)
logging.info(f"Loaded {len(dat)} events into dataframe.")

# extract_actions
dat['action_dict'] = dat['action']
dat['active_option'] = dat.action_dict.map(
    lambda d: d.get("active_option", False) == "BackAndTurn")
dat['action'] = dat.action_dict.map(lambda a: a.get('action'))
dat = dat[(dat.action != "favicon.ico")]
dat = dat[(dat.action != "stop")]
from utils import Preprocessor
from utils import save_model
from utils import loader
from featurizer import Featurize
from sentiment_analyzer import NaiveBayes
from sklearn.model_selection import train_test_split
from evaluator import evaluate_accuracy
import pickle

X, y = loader()
X_train, X_test, y_train, y_test = train_test_split(X,
                                                    y,
                                                    test_size=0.2,
                                                    random_state=42)
print(X_train)

token = Preprocessor()
file1 = open('preprocessor.obj', 'wb')
pickle.dump(token, file1)
file1.close()

train_preprocessed = [token.tweet_cleaner(i) for i in X_train]
f = Featurize()
train_features = f.vectorize_train(train_preprocessed)
file2 = open('featurizer.obj', 'wb')
pickle.dump(f, file2)
file2.close()

model = NaiveBayes()
clf = model.train(train_features, labels=y_train)
save_model(clf)
Esempio n. 10
0
def fabricBase(apic, *args, **kwargs):
    '''
	Function takes in existing APIC requests session, reads in values
	from a xlsx spreadsheet, and uses them to create APIC MOs related
	to APIC Fabric Policies. Allows passing of filename for the xlsx,
	but if one is not specified, values.xlsx is used as a default

	param apic: requests session to use for HTTP Methods

	'''
    options = values()
    options.parseArgs(args)
    keywords = kwargs
    # Check for filename arg. if not specified, generate base values/objects
    wb = options.filename
    # Open Values xlsx. If it doesn't existing raise a fault
    filePath = os.path.abspath(wb)

    # Check if workbook exists and load workbook with pandas
    if not os.path.exists(filePath):
        logging.critical('values.xlsx or {} not found!'.format(wb))
        sys.exit()
    # Load jinja2 templates
    env = utils.loader()
    if options.n == True:
        nodeDf = pd.read_excel(filePath, sheet_name='nodes')
        for row in nodeDf.iterrows():
            nodePro = row[1]['nodePName']
            intfPro = row[1]['intfPName']
            nodeId = row[1]['nodeId']
            nodePolGrp = row[1]['nodePolGrp']
            logging.info('creating nodeP {} with nodeId {}'.format(
                nodePro, nodeId))
            createNodeP(env, apic, nodePro, nodeId, nodePolGrp)
    if options.i == True:
        intfProDf = pd.read_excel(filePath, sheet_name='interfaceProfiles')
        for row in intfProDf.iterrows():
            nodePro = row[1]['nodePName']
            intfPro = row[1]['intfPName']
            logging.info('creating intfP {}'.format(intfPro))
            logging.info('associating to nodeP {}'.format(nodePro))
            createIntfP(env, apic, intfPro)
            createIntfRs(env, apic, nodePro, intfPro)
    if options.p == True:
        logging.info('Creating Interface Policy Groups')
        intfPolGrpDf = pd.read_excel(filePath,
                                     sheet_name='interfacePolicyGroups')
        intfPolGrpDf.where(intfPolGrpDf.notnull(), '')
        for row in intfPolGrpDf.iterrows():
            lagT = str(row[1]['lagT'])
            if lagT.lower() == 'node' or lagT.lower() == 'link':
                postMoUni(env, apic, 'infraAccBndlGrp.json', row[1])
                logging.info(
                    'VPC|Port-channel Interface Policy Group {} deployed'.
                    format(row[1]['name']))
            elif lagT == 'nan':
                postMoUni(env, apic, 'infraAccPortGrp.json', row[1])
                logging.info(
                    'Access Interface Policy Group {} deployed'.format(
                        row[1]['name']))
            else:
                logging.critical(
                    'Invalid lagT value in worksheet interfacePolicyGroups')
                logging.critical(
                    'node = vpc; link=discrete port-channel; <null> = access')
                logging.critical('lagT value specified was {}'.format(lagT))
                sys.exit()
            time.sleep(5)
    if options.d == True:
        logging.info('Deploying interface selectors from interfaces worksheet')
        intfDf = pd.read_excel(filePath, sheet_name='interfaces')
        for row in intfDf.iterrows():
            postMoUni(env, apic, 'infraHPortS.json', row[1])
            logging.info('Deployed {} to interface profile {}'.format(
                row[1]['hPortS'], row[1]['accPortProf']))
            time.sleep(3)

    if options.s == True and options.w == '':
        logging.critical(
            'single post set, but worksheet option -w not specified')
        logging.critical('please specify a worksheet to load post data from')
        sys.exit(1)
    elif options.s == True and options.w != '':
        postMoUni(env, apic, kwargs['template'], kwargs)
Esempio n. 11
0
if __name__ == '__main__':

    args = parser1.arg_parse()
    '''create directory to save trained model and other info'''
    if not os.path.exists(args.save_dir):
        os.makedirs(args.save_dir)
    ''' setup GPU '''
    torch.cuda.set_device(args.gpu)
    ''' setup random seed '''
    np.random.seed(args.random_seed)
    torch.manual_seed(args.random_seed)
    torch.cuda.manual_seed(args.random_seed)
    ''' load dataset and prepare data loader '''
    print('===> prepare dataloader ...')
    train_loader, gallery_loader, query_loader = loader(args)
    ''' load model '''
    print('===> prepare model ...')
    model = Model()
    if mgpus:
        model = torch.nn.DataParallel(model, device_ids=list([0, 1])).cuda()
    elif not mgpus:
        model.cuda()  # load model to gpu
    ''' define loss '''
    criterion = nn.CrossEntropyLoss()
    t_loss = TripletLoss()
    ''' setup optimizer '''
    optimizer = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=args.weight_decay)
    ''' setup tensorboard '''
Esempio n. 12
0
def train_classifier(context,
                     train_data,
                     test_data,
                     batch_size,
                     batch_xs,
                     Model,
                     model_args,
                     init,
                     Criterion,
                     Optimizer,
                     optimizer_args,
                     importance_optimizer_args,
                     w_init='uniform',
                     calculate_cosines=False):
    model = Model(*model_args).to(get_device())
    optimizer = Optimizer(model.parameters(), *optimizer_args)
    if not model.baseline:
        importance_optimizer = Optimizer(model.importance_parameters(),
                                         *importance_optimizer_args)
    criterion = Criterion(reduction='mean')
    image_batch_xs = [
        x for i, x in enumerate(batch_xs)
        if i % (len(batch_xs) // IMAGE_SAVE_COUNT) == 0
    ]
    try:
        # batches_per_epoch = int(math.ceil(len(train_data) / batch_size))
        if context['model_state'] is None:
            model.apply(init)
            model.init_layer_importances(w_init)
            current_x = 0
            context['current_x'] = 0
            context['model_state'] = None
            context['optimizer_state'] = None
            context['batch_size'] = batch_size
            context['train_data_len'] = len(train_data)
            context['code'] = SAVED_SOURCE
        else:
            model.load_state_dict(context['model_state'])
            optimizer.load_state_dict(context['optimizer_state'])
            current_x = context['current_x']
        result_file = Path(
            context['dir_name']) / f"{context['run_name']}@state"
        summary_writer = SummaryWriter(
            f"{context['dir_name']}/{context['run_name']}")
        # summary_writer.add_graph(model, torch.randn(()))
        train_loader = loader(train_data, batch_size)
        test_loader = loader(test_data, batch_size)
        model.train()
        model_saved = datetime.datetime.now()
        with tqdm(total=batch_xs[-1],
                  initial=current_x,
                  unit_scale=True,
                  dynamic_ncols=True) as pbar:
            while current_x <= batch_xs[-1]:
                for X, y in train_loader:
                    X = X.to(get_device())
                    y = y.to(get_device())

                    # Diagnostics
                    if current_x in batch_xs:
                        now = datetime.datetime.now()
                        pbar.set_description(
                            desc=
                            f'Last save {(now - model_saved).total_seconds():.0f}s ago',
                            refresh=False)
                        if calculate_cosines and not model.baseline and (
                                current_x in image_batch_xs
                                or current_x == batch_xs[-1]):
                            y_pred, layer_outputs = model(X)
                            loss, pen_loss = model.calculate_loss(
                                y_pred, y, criterion)
                            non_head_parameters = list(
                                model.non_head_parameters())
                            assert len(non_head_parameters) == len(
                                layer_outputs) == model.num_heads

                            # for every layer separately
                            for i in range(model.num_heads):
                                layer_gradients = []
                                # calculate gradients for every head
                                for j in range(model.num_heads):
                                    if j < i:
                                        layer_gradients.append(
                                            torch.cat([
                                                torch.zeros_like(p).view(-1)
                                                for p in non_head_parameters[i]
                                            ]))
                                        continue
                                    # calculate same output again, but with detach
                                    layer_outputs_detached = [
                                        layer_output.detach()
                                        if k != j else layer_output
                                        for k, layer_output in enumerate(
                                            layer_outputs)
                                    ]
                                    # and its loss
                                    layer_loss, _ = model.calculate_loss(
                                        layer_outputs_detached, y, criterion)
                                    assert layer_loss.allclose(loss)
                                    layer_gradient = torch.autograd.grad(
                                        layer_loss,
                                        non_head_parameters[i],
                                        retain_graph=True)
                                    layer_gradients.append(
                                        torch.cat([
                                            gradient.view(-1)
                                            for gradient in layer_gradient
                                        ]))
                                # calculate stats
                                with torch.no_grad():
                                    # calculate norms
                                    # norms = {f'{j}': torch.norm(layer_gradient.view(-1), p=2).item() for
                                    #          (j, layer_gradient) in enumerate(layer_gradients)}
                                    # summary_writer.add_scalars(f'Layer {i} norms', norms, global_step=current_x)
                                    # calculate cosine similarities and put them into a matrix
                                    cs_matrix = torch.zeros(
                                        model.num_heads,
                                        model.num_heads,
                                        device=get_device())
                                    for j in range(i, len(layer_gradients)):
                                        for k in range(j,
                                                       len(layer_gradients)):
                                            cs_matrix[k][
                                                j] = torch.nn.functional.cosine_similarity(
                                                    layer_gradients[j].view(
                                                        -1),
                                                    layer_gradients[k].view(
                                                        -1),
                                                    dim=0)
                                            cs_matrix[j][k] = cs_matrix[k][j]
                                    fig = matrix_to_figure(cs_matrix)
                                    summary_writer.add_figure(
                                        f'Layer {i} cosine similarities',
                                        fig,
                                        global_step=current_x)
                                    close(fig)
                                del layer_gradients, layer_gradient

                            true_grad_cs = torch.zeros(model.num_heads,
                                                       device=get_device())
                            for i in range(model.num_heads):
                                target_params = non_head_parameters[0]
                                layer_outputs_detached = [
                                    layer_output.detach()
                                    if k != i else layer_output for k,
                                    layer_output in enumerate(layer_outputs)
                                ]
                                layer_loss, _ = model.calculate_loss(
                                    layer_outputs_detached, y, criterion)
                                layer_gradient = torch.autograd.grad(
                                    layer_loss,
                                    target_params,
                                    retain_graph=True)
                                layer_gradient = torch.cat([
                                    gradient.view(-1)
                                    for gradient in layer_gradient
                                ])
                                true_grad = torch.autograd.grad(
                                    loss, target_params, retain_graph=True)
                                true_grad = torch.cat([
                                    gradient.view(-1) for gradient in true_grad
                                ])
                                true_grad_cs[
                                    i] = torch.nn.functional.cosine_similarity(
                                        layer_gradient.view(-1),
                                        true_grad.view(-1),
                                        dim=0)
                                del layer_gradient, true_grad
                            fig = cs_vec_to_figure(true_grad_cs)
                            summary_writer.add_figure(
                                f'True cosine similarities (base layer 1)',
                                fig,
                                global_step=current_x)
                            close(fig)

                            true_grad_cs = torch.zeros(model.num_heads,
                                                       device=get_device())
                            for i in range(model.num_heads):
                                target_params = list(
                                    chain.from_iterable(non_head_parameters))
                                layer_outputs_detached = [
                                    layer_output.detach()
                                    if k != i else layer_output for k,
                                    layer_output in enumerate(layer_outputs)
                                ]
                                layer_loss, _ = model.calculate_loss(
                                    layer_outputs_detached, y, criterion)
                                layer_gradient = torch.autograd.grad(
                                    layer_loss,
                                    target_params,
                                    retain_graph=True,
                                    allow_unused=True)
                                to_cat = []
                                for gradient, param in zip(
                                        layer_gradient, target_params):
                                    if gradient is not None:
                                        to_cat.append(gradient.view(-1))
                                    else:
                                        to_cat.append(
                                            torch.zeros_like(param).view(-1))
                                layer_gradient = torch.cat(to_cat)
                                true_grad = torch.autograd.grad(
                                    loss, target_params, retain_graph=True)
                                to_cat = []
                                for gradient, param in zip(
                                        true_grad, target_params):
                                    if gradient is not None:
                                        to_cat.append(gradient.view(-1))
                                    else:
                                        to_cat.append(
                                            torch.zeros_like(param).view(-1))
                                true_grad = torch.cat(to_cat)
                                true_grad_cs[
                                    i] = torch.nn.functional.cosine_similarity(
                                        layer_gradient.view(-1),
                                        true_grad.view(-1),
                                        dim=0)
                                del layer_gradient, true_grad
                            fig = cs_vec_to_figure(true_grad_cs)
                            summary_writer.add_figure(
                                f'True cosine similarities',
                                fig,
                                global_step=current_x)
                            close(fig)

                        test_loss, test_acc = test_classification(model,
                                                                  test_loader,
                                                                  criterion,
                                                                  batches=10)
                        train_loss, train_acc = test_classification(
                            model, train_loader, criterion, batches=10)
                        summary_writer.add_scalar('Eval/Test loss',
                                                  test_loss,
                                                  global_step=current_x)
                        summary_writer.add_scalar('Eval/Test accuracy',
                                                  test_acc,
                                                  global_step=current_x)
                        summary_writer.add_scalar('Eval/Train loss',
                                                  train_loss,
                                                  global_step=current_x)
                        summary_writer.add_scalar('Eval/Train accuracy',
                                                  train_acc,
                                                  global_step=current_x)
                        # save weights
                        if not model.baseline:
                            y_pred, layer_outputs = model(X)
                            loss, pen_loss = model.calculate_loss(
                                y_pred, y, criterion)
                            weights = {
                                f'{i}': value.item()
                                for i, value in enumerate(model.ws)
                            }
                            summary_writer.add_scalars('Weights',
                                                       weights,
                                                       global_step=current_x)
                            if current_x in image_batch_xs or current_x == batch_xs[
                                    -1]:
                                fig, ax = plt.subplots(figsize=(16, 9))
                                indices = np.arange(1, model.ws.size(0) + 1)
                                values = torch.softmax(model.ws.detach().cpu(),
                                                       dim=0).numpy()
                                ax.bar(indices, values)
                                summary_writer.add_figure(
                                    'Weights', fig, global_step=current_x)
                                close(fig)

                                layer_weights_gradient = torch.autograd.grad(
                                    loss, model.ws)
                                fig, ax = plt.subplots(figsize=(16, 9))
                                indices = np.arange(1, model.ws.size(0) + 1)
                                values = layer_weights_gradient[0].cpu().numpy(
                                )
                                ax.bar(indices, values)
                                summary_writer.add_figure(
                                    'Weights grad', fig, global_step=current_x)
                                close(fig)

                                layer_losses = [
                                    criterion(layer_output, y).item()
                                    for layer_output in layer_outputs
                                ]
                                fig, ax = plt.subplots(figsize=(16, 9))
                                indices = np.arange(1, model.ws.size(0) + 1)
                                values = layer_losses
                                ax.bar(indices, values)
                                summary_writer.add_figure(
                                    'Layer losses', fig, global_step=current_x)
                                close(fig)
                        # save model conditionally
                        if (now - model_saved
                            ).total_seconds() > 60 * MODEL_SAVE_MINUTES:
                            # save training state
                            context['current_x'] = current_x
                            context['model_state'] = model.state_dict()
                            context['optimizer_state'] = optimizer.state_dict()
                            signal.pthread_sigmask(
                                signal.SIG_BLOCK,
                                {signal.SIGINT, signal.SIGTERM})
                            with open(result_file, 'wb') as f:
                                torch.save(context, f)
                            signal.pthread_sigmask(
                                signal.SIG_UNBLOCK,
                                {signal.SIGINT, signal.SIGTERM})
                            model_saved = datetime.datetime.now()

                    # Training step
                    y_pred, layer_outputs = model(X)

                    loss, pen_loss = model.calculate_loss(y_pred, y, criterion)
                    optimizer.zero_grad()
                    if not model.baseline:
                        importance_optimizer.zero_grad()
                    loss.backward(
                        retain_graph=True if current_x in batch_xs else False)
                    optimizer.step()
                    if not model.baseline:
                        importance_optimizer.step()

                    summary_writer.add_scalar(f'Train/Loss',
                                              loss.item(),
                                              global_step=current_x)
                    summary_writer.add_scalar(f'Train/Penalization loss',
                                              pen_loss.item(),
                                              global_step=current_x)
                    summary_writer.add_scalar(
                        f'Train/Sum of probabilities (exp)',
                        y_pred.exp().mean().item(),
                        global_step=current_x)

                    pbar.update()
                    if current_x >= batch_xs[-1]:
                        current_x += 1
                        break
                    else:
                        current_x += 1

        if 'final_acc' not in context:
            context['current_x'] = current_x
            context['model_state'] = model.state_dict()
            context['optimizer_state'] = optimizer.state_dict()
            test_loss, test_acc = test_classification(model,
                                                      test_loader,
                                                      criterion,
                                                      batches=0)
            context['final_acc'] = test_acc
            context['final_loss'] = test_loss
            print(f'Final loss: {test_loss}')
            train_loss, train_acc = test_classification(model,
                                                        train_loader,
                                                        criterion,
                                                        batches=0)
            context['final_train_acc'] = train_acc
            context['final_train_loss'] = train_loss
            print(f'Final train loss: {train_loss}')
            # save model to secondary storage
            signal.pthread_sigmask(signal.SIG_BLOCK,
                                   {signal.SIGINT, signal.SIGTERM})
            with open(result_file, 'wb') as f:
                torch.save(context, f)
            signal.pthread_sigmask(signal.SIG_UNBLOCK,
                                   {signal.SIGINT, signal.SIGTERM})
        if not model.baseline:
            if 'cutout_final_acc' not in context:
                cut_model, chosen_layer, single_choice = cutout_baseline(model)
                test_loss, test_acc = test_classification(cut_model,
                                                          test_loader,
                                                          criterion,
                                                          batches=0)
                train_loss, train_acc = test_classification(cut_model,
                                                            train_loader,
                                                            criterion,
                                                            batches=0)
                context['chosen_layer'] = chosen_layer
                context['single_choice'] = single_choice
                context['cutout_final_acc'] = test_acc
                context['cutout_final_loss'] = test_loss
                context['cutout_final_train_acc'] = train_acc
                context['cutout_final_train_loss'] = train_loss
                # save model to secondary storage
                signal.pthread_sigmask(signal.SIG_BLOCK,
                                       {signal.SIGINT, signal.SIGTERM})
                with open(result_file, 'wb') as f:
                    torch.save(context, f)
                signal.pthread_sigmask(signal.SIG_UNBLOCK,
                                       {signal.SIGINT, signal.SIGTERM})
            print(f"Final ACC (cutout model): {context['cutout_final_acc']}")
            print(
                f"Final train ACC (cutout model): {context['cutout_final_train_acc']}"
            )
            print(f"Chosen layer (cutout model): {context['chosen_layer']}")
            print(f"Single choice (cutout model): {context['single_choice']}")
        print(f"Final ACC: {context['final_acc']}")
        print(f"Final train ACC: {context['final_train_acc']}")

    except KeyboardInterrupt as e:
        return context, e
    except Exception as e:
        context['exception'] = e
        context['traceback'] = traceback.format_exc()
    return context, None
Esempio n. 13
0
def main():
    num_epoches = 200
    #1. first load data
    train_dataset = load_dataset('train')
    valid_dataset = load_dataset('valid')
    loader_train = loader(train_dataset, c.batch_size)
    loader_valid = loader(valid_dataset, c.batch_size)
    test_steps = len(valid_dataset['label']) / c.batch_size
    print(len(train_dataset['label']))
    logging.info("training %d valid %d" %
                 (len(train_dataset['label']), len(valid_dataset['label'])))
    #2. then load model
    input_shape = (256, 256, 3)
    model = convolutional_model(input_shape, batch_size=c.batch_size)

    #logging.info(model.summary())
    opt = optimizers.Adam(lr=0.0001)
    model.compile(optimizer=opt,
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    grad_steps = 0
    last_checkpoint = get_last_checkpoint_if_any(c.checkpoint_folder)

    steps_per_epoch = len(train_dataset['label']) / c.batch_size

    #last_checkpoint = None
    best_acc = 0
    print(last_checkpoint)
    if last_checkpoint is not None:
        logging.info('Found checkpoint [{}]. Resume from here...'.format(
            last_checkpoint))
        print('loadding checkpoing %s' % (last_checkpoint))
        model.load_weights(last_checkpoint)
        grad_steps = int(last_checkpoint.split('_')[-2])
        logging.info('[DONE]')
    for i in range(num_epoches):
        print("Epoch %d" % (i))
        for j in range(steps_per_epoch):
            orig_time = time()
            x_train, y_train = loader_train.next()
            [loss, acc] = model.train_on_batch(x_train,
                                               y_train)  # return [loss, acc]
            logging.info(
                'Train Steps:{0}, Time:{1:.2f}s, Loss={2}, Accuracy={3}'.
                format(grad_steps,
                       time() - orig_time, loss, acc))
            if (grad_steps % 100 == 0):
                print(
                    "Training epoch   [%d] steps  [%d]    acc [%f]      loss [%f]"
                    % (i, grad_steps, acc, loss))
            with open(c.checkpoint_folder + "/train_loss_acc.txt", "a") as f:
                f.write("{0},{1},{2}\n".format(grad_steps, loss, acc))

            if grad_steps % c.test_per_epoches == 0:
                losses = []
                accs = []
                for ss in range(test_steps):
                    x_valid, y_valid = loader_valid.next()
                    [loss, acc] = model.test_on_batch(x_valid, y_valid)
                    losses.append(loss)
                    accs.append(acc)
                loss = np.mean(np.array(losses))
                acc = np.mean(np.array(accs))
                print("Test at epoch    ", i, "steps    ", grad_steps,
                      "avg loss   ", loss, "avg acc   ", acc)
                logging.info(
                    'Test the Data ---------- Steps:{0}, Loss={1}, Accuracy={2}, '
                    .format(grad_steps, loss, acc))
                with open(c.checkpoint_folder + "/test_loss_acc.txt",
                          "a") as f:
                    f.write("{0},{1},{2}\n".format(grad_steps, loss, acc))
                    if grad_steps % c.save_per_epoches == 0:
                        create_dir_and_delete_content(c.checkpoint_folder)
                        model.save_weights('{0}/model_{1}_{2:.5f}.h5'.format(
                            c.checkpoint_folder, grad_steps, loss))
                # Save the best one
                if acc > best_acc:
                    best_acc = acc
                    create_dir_and_delete_content(c.best_checkpoint_dir)
                    model.save_weights(
                        c.best_checkpoint_dir +
                        '/best_model{0}_{1:.5f}.h5'.format(grad_steps, acc))
            grad_steps += 1
Esempio n. 14
0
 def hSubfolders(self, action):
     self.listWdg.clear()
     fileListGen = self.doGen(self.folder, withsub=action.isChecked())
     # launch loader instance
     thr = loader(fileListGen, self.listWdg)
     thr.start()
Esempio n. 15
0
def train(args):
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
    init_net = nn.DataParallel(models.InitNet(args)).to(device)
    deep_net = nn.DataParallel(models.UNet(args)).to(device)

    print("Data loading.")
    dataset = utils.loader(args)
    print("Data loaded.")

    criterion = nn.L1Loss().to(device)
    optimizer_init = optim.Adam(init_net.parameters())
    optimizer_deep = optim.Adam(deep_net.parameters())
    scheduler_init = optim.lr_scheduler.MultiStepLR(optimizer_init, milestones=[50, 80], gamma=0.1)
    scheduler_deep = optim.lr_scheduler.MultiStepLR(optimizer_deep, milestones=[50, 80], gamma=0.1)

    print("Train start.")
    time_start = time.time()

    if os.path.exists(args.init_state_dict) and os.path.exists(args.deep_state_dict):
        if torch.cuda.is_available():
            checkpoint_init = torch.load(args.init_state_dict)
            checkpoint_deep = torch.load(args.deep_state_dict)
        else:
            checkpoint_init = torch.load(args.init_state_dict, map_location="cpu")
            checkpoint_deep = torch.load(args.deep_state_dict, map_location="cpu")

        init_net.load_state_dict(checkpoint_init["model"])
        optimizer_init.load_state_dict(checkpoint_init["optimizer"])

        deep_net.load_state_dict(checkpoint_deep["model"])
        optimizer_deep.load_state_dict(checkpoint_deep["optimizer"])

        start_epoch = checkpoint_deep["epoch"]
        print("Success loading epoch {}".format(start_epoch))
    else:
        start_epoch = 0
        print("No saved model, start epoch = 0.")

    for epoch in range(start_epoch, args.epochs):
        for idx, item in enumerate(dataset):
            x, _ = item
            x = x.to(device)

            optimizer_init.zero_grad()
            optimizer_deep.zero_grad()

            init_x = init_net(x)
            init_x = utils.reshape(init_x, args)
            deep_x = deep_net(init_x)

            loss_init = criterion(x, init_x)
            loss_deep = criterion(x, init_x + deep_x)

            loss_init.backward(retain_graph=True)
            loss_deep.backward()

            optimizer_init.step()
            optimizer_deep.step()

            use_time = time.time() - time_start
            if (idx + 1) % 20 == 0:
                print("=> epoch: {}, batch: {}, Loss1: {:.4f}, Loss2: {:.4f}, lr1: {}, lr2: {}, used time: {:.4f}"
                      .format(epoch + 1, idx + 1, loss_init.item(), loss_deep.item(),
                              optimizer_init.param_groups[0]['lr'], optimizer_deep.param_groups[0]['lr'], use_time))

        scheduler_init.step()
        scheduler_deep.step()
        state_init = {"model": init_net.state_dict(), "optimizer": optimizer_init.state_dict()}
        state_deep = {"model": deep_net.state_dict(), "optimizer": optimizer_deep.state_dict(), "epoch": epoch + 1}
        torch.save(state_init, args.init_state_dict)
        torch.save(state_deep, args.deep_state_dict)
        print("Check point of epoch {} saved.".format(epoch + 1))

    print("Train end.")
    torchsummary.summary(init_net, (1, 32, 32))
    torchsummary.summary(deep_net, (1, 32, 32))
    with open("./trained_models/init_net.txt", "w") as f1:
        f1.write(torchsummary.summary(init_net, (1, 32, 32)))
    with open("./trained_models/deep_net.txt", "w") as f2:
        f2.write(torchsummary.summary(deep_net, (1, 32, 32)))
Esempio n. 16
0
args = parser.parse_args()

best_acc = 0

net = net().to('cuda')
net = torch.nn.DataParallel(net)
cudnn.benchmark = True

if args.resume:
    print('==> Resuming from checkpoint..')
    assert os.path.isdir('checkpoint'), 'Error: no checkpoint directory found!'
    checkpoint = torch.load('./checkpoint/{}.t7'.format(args.model_name))
    net.load_state_dict(checkpoint['net'])
    best_F2 = checkpoint['F2']

train_loader = Data.DataLoader(loader('datafile/train.txt'),
                               batch_size=args.batch_size,
                               shuffle=True,
                               num_workers=8,
                               drop_last=True)
test_loader = Data.DataLoader(loader('datafile/val.txt', test=True),
                              batch_size=args.batch_size,
                              num_workers=8)

criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(net.parameters(),
                      lr=args.lr,
                      momentum=0.9,
                      weight_decay=5e-4)

for epoch in range(args.num_epoch):
Esempio n. 17
0
num_batch_train = np.ceil(num_data_train / batch_size)
num_batch_val = np.ceil(num_data_val / batch_size)

## 그 밖에 부수적인 functions 설정하기
fn_tonumpy = lambda x: x.to('cpu').detach().numpy.transpose(0, 2, 3, 1)
fn_denorm = lambda x, mean, std: (x * std) + mean
fn_class = lambda x: 1.0 * (x > 0.5)

## Tensorboard를 사용하기 위한 SummaryWriter 설정
writer_train = SummaryWriter(log_dir=os.path.join(log_dir, 'train'))
writer_val = SummaryWriter(log_dir=os.path.join(log_dir, 'val'))

## 네트워크 학습시키기
st_epoch = 0
net, optim, st_epoch = loader(ckpt_dir=ckpt_dir, net=net, optim=optim)

for epoch in range(st_epoch + 1, num_epoch + 1):
    net.train()
    loss_arr = []

    for batch, data in enumerate(loader_train, 1):
        #forward pass
        label = data['label'].to(device)
        image = data['image'].to(device)

        output = net(image)

        #backward pass
        optim.zero_grad()
Esempio n. 18
0
import torch.nn.functional as F

from model import Model
from dataset import Dataset
from utils import loader,details
from ctc import Predictor 

#import torch.multiprocessing
#torch.multiprocessing.set_sharing_strategy('file_system')
 
EPOCHS = 12
TIME_ZONE = pytz.timezone("America/New_York")
DEVICE = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

# load data
train_loader = loader("data/wsj0_train.npy","data/wsj0_train_merged_labels.npy")
#train_loader = loader("data/wsj0_dev.npy","data/wsj0_dev_merged_labels.npy")
valid_loader = loader("data/wsj0_dev.npy","data/wsj0_dev_merged_labels.npy")

_begin_time = time.time()
# prepare model
model = Model(device = DEVICE)
model.load_state_dict(torch.load("model.pt"))

criterion = nn.CTCLoss()
opt = optim.Adam(model.parameters(),lr=0.0001)
predictor = Predictor()

_end_time = time.time()
print("prepare model: " + str(_end_time - _begin_time))
_begin_time = time.time()