Esempio n. 1
0
def train(config_file):
    # 1, load configuration parameters
    config = parse_config(config_file)
    config_common = config['common']
    config_data = config['data']
    config_net = config['network']
    config_train = config['training']

    random.seed(config_train.get('random_seed', 1))
    assert (config_data['with_ground_truth'])

    class_num = config_net['class_num']

    # 2 load data
    data_names = get_patient_names(config_data["data_names"])
    split_ratio = int(config_train["train_val_ratio"] * len(data_names))
    random.Random(42).shuffle(data_names)
    config_data["train_names"] = data_names[:split_ratio]
    config_data["val_names"] = data_names[split_ratio:]
    dataset_tr = DataLoader("train", config_data)
    dataset_tr.load_data()
    train_loader = torch.utils.data.DataLoader(
        dataset_tr,
        batch_size=config_train['train_batch_size'],
        shuffle=True,
        num_workers=4,
        pin_memory=True)

    dataset_val = DataLoader("validate", config_data)
    dataset_val.load_data()
    val_loader = torch.utils.data.DataLoader(
        dataset_val,
        batch_size=config_train['val_batch_size'],
        shuffle=False,
        num_workers=4,
        pin_memory=True)

    # 3, load model
    # load pretrained
    empty_model = ARCNet(class_num,
                         vae_enable=config_train['vae_enable'],
                         config=config_data)
    if config_train['model_pre_trained']:
        arcnet_model = torch.load(config_train['model_pre_trained'])
    else:
        arcnet_model = ARCNet(class_num,
                              vae_enable=config_train['vae_enable'],
                              config=config_data)

    # 4, start to train
    solver = Solver(
        arcnet_model,
        exp_name=config_train['exp_name'],
        device=config_common['device'],
        class_num=config_net['class_num'],
        optim_args={
            "lr": config_train['learning_rate'],
            "betas": config_train['optim_betas'],
            "eps": config_train['optim_eps'],
            "weight_decay": config_train['optim_weight_decay']
        },
        loss_args={
            "vae_loss": config_train['vae_enable'],
            "loss_k1_weight": config_train['loss_k1_weight'],
            "loss_k2_weight": config_train['loss_k2_weight']
        },
        model_name=config_common['model_name'],
        labels=config_data['labels'],
        log_nth=config_train['log_nth'],
        num_epochs=config_train['num_epochs'],
        lr_scheduler_step_size=config_train['lr_scheduler_step_size'],
        lr_scheduler_gamma=config_train['lr_scheduler_gamma'],
        use_last_checkpoint=config_train['use_last_checkpoint'],
        log_dir=config_common['log_dir'],
        exp_dir=config_common['exp_dir'])

    solver.train(train_loader, val_loader)
    if not os.path.exists(config_common['save_model_dir']):
        os.makedirs(config_common['save_model_dir'])
    final_model_path = os.path.join(config_common['save_model_dir'],
                                    config_train['final_model_file'])
    solver.model = empty_model
    solver.save_best_model(final_model_path)
    print("final model saved @ " + str(final_model_path))
def main():

    ip_file = None
    myargs = getopts(argv)
    if "-f" in myargs:
        ip_file = myargs["-f"]
    else:
        print "Usage: python mission_assign.py -f <input file>"
        exit()

    op_file = ip_file.split(".")[0] + "_output.txt"

    # create parser object
    parser = FileParser(filepath=ip_file)

    # flatten the uas indices into instances, taking into account uas count per uas
    parser.create_uas_index_to_instance_map()
    # setup mission map where each entry is a mission id - compatible uas list map.
    missions = parser.get_domain_for_missions()
    # setup pilot map where each entry is a pilot name - compatible uas list map.
    pilots, pilots_pref = parser.get_domain_for_pilots()
    uas_max = parser.get_uas_instance_count()
    uas_names = parser.get_uas_names()

    # create the solver object
    solver = Solver(pilot_map=pilots,
                    mission_map=missions,
                    uas_max=uas_max,
                    pilot_prefs={})

    # build the domain variables
    solver.build_variables()
    # build the constraints
    solver.build_constraints()

    # start the timer for 90 seconds
    t = threading.Timer(90.0, timeout)
    t.start()
    # solve the 'problem'
    solution = solver.get_solution()
    # solution was found, timer can be stopped
    t.cancel()

    if solution:

        # pretty print logic follows
        print "Solution found! Writing to file..." + op_file
        pretty_map = {}
        for key, value in solution.iteritems():
            if type(key) == int:
                if value in pretty_map:
                    pretty_map[value]["mission"].append(key)
                else:
                    pretty_map[value] = {
                        "mission": [key],
                        "pilot": None,
                        "uas":
                        uas_names[parser.get_uas_index_from_instance(value)],
                        "fav": None
                    }

        sorted_pretty_list = [None] * len(missions)

        for key, value in solution.iteritems():
            if type(key) != int:
                pretty_map[value]["pilot"] = key
                pretty_map[value][
                    "fav"] = "Yes" if value in pilots_pref[key] else "No"

        for uas, value in pretty_map.iteritems():
            missions = value["mission"]
            for mission in missions:
                sorted_pretty_list[mission] = str("M" + str(mission + 1) +
                                                  " " + value["pilot"] + " " +
                                                  value["uas"] + " " +
                                                  value["fav"])

        with open(op_file, 'w') as f:
            for assignment in sorted_pretty_list:
                f.write(assignment + "\n")
                print assignment
    else:
        print "No solution found!"
        with open(op_file, 'w') as f:
            f.write("No solution found!" + "\n")
def main():
    solver = Solver(args,
                    source=args.source,
                    target=args.target,
                    learning_rate=args.lr,
                    batch_size=args.batch_size,
                    optimizer=args.optimizer,
                    num_k=args.num_k,
                    all_use=args.all_use,
                    checkpoint_dir=args.checkpoint_dir,
                    save_epoch=args.save_epoch)
    record_num = 1
    if args.source == 'usps' or args.target == 'usps':

        record_train = 'record/%s_%s_k_%s_alluse_%s_onestep_%s_%s.txt' % (
            args.source, args.target, args.num_k, args.all_use, args.one_step,
            record_num)
        record_test = 'record/%s_%s_k_%s_alluse_%s_onestep_%s_%s_test.txt' % (
            args.source, args.target, args.num_k, args.all_use, args.one_step,
            record_num)
        while os.path.exists(record_train):
            record_num += 1
            record_train = 'record/%s_%s_k_%s_alluse_%s_onestep_%s_%s.txt' % (
                args.source, args.target, args.num_k, args.all_use,
                args.one_step, record_num)
            record_test = 'record/%s_%s_k_%s_alluse_%s_onestep_%s_%s_test.txt' % (
                args.source, args.target, args.num_k, args.all_use,
                args.one_step, record_num)
    else:
        record_train = 'record/%s_%s_k_%s_onestep_%s_%s.txt' % (
            args.source, args.target, args.num_k, args.one_step, record_num)
        record_test = 'record/%s_%s_k_%s_onestep_%s_%s_test.txt' % (
            args.source, args.target, args.num_k, args.one_step, record_num)
        while os.path.exists(record_train):
            record_num += 1
            record_train = 'record/%s_%s_k_%s_onestep_%s_%s.txt' % (
                args.source, args.target, args.num_k, args.one_step,
                record_num)
            record_test = 'record/%s_%s_k_%s_onestep_%s_%s_test.txt' % (
                args.source, args.target, args.num_k, args.one_step,
                record_num)

    if not os.path.exists(args.checkpoint_dir):
        os.mkdir(args.checkpoint_dir)
    if not os.path.exists('record'):
        os.mkdir('record')
    if args.eval_only:
        solver.test(0)
    else:
        count = 0
        A_st_n = 0.5
        J_w_n = 0.5
        max_Jw = 1
        min_Jw = 1
        Ast_max = 0
        Ast_min = 0
        acc_m = 0
        for t in range(args.max_epoch):
            A_st_norm = A_st_n
            J_w_norm = J_w_n
            A_st_max = Ast_max
            A_st_min = Ast_min
            max_J_w = max_Jw
            min_J_w = min_Jw
            if not args.one_step:
                Ast_min, Ast_max, min_Jw, max_Jw, A_st_n, J_w_n, num = solver.train(
                    A_st_min,
                    A_st_max,
                    min_J_w,
                    max_J_w,
                    A_st_norm,
                    J_w_norm,
                    t,
                    record_file=record_train)
                if t == 0:
                    min_Jw = max_Jw
            else:
                num = solver.train_onestep(t, record_file=record_train)
            count += num

            acc_max = acc_m
            if t % 1 == 0:
                acc_m = solver.test(acc_max,
                                    t,
                                    record_file=record_test,
                                    save_model=args.save_model)
            if count >= 20000:
                break
Esempio n. 4
0
    parser.add_argument(
        '-data_dir',
        '-d',
        default=
        '/work/b07u1234/b06502162/HW2-1/github-HW2-1/vctk/trimmed_vctk_spectrograms/sr_24000_mel_norm/'
    )
    parser.add_argument('-train_set', default='train_128')
    parser.add_argument('-train_index_file', default='train_samples_128.json')
    parser.add_argument('-logdir', default='log/')
    parser.add_argument('--load_model', action='store_true')
    parser.add_argument('--load_opt', action='store_true')
    parser.add_argument('-store_model_path',
                        default='/work/b07u1234/b06502162/HW2-1/github-HW2-1')
    parser.add_argument('-load_model_path',
                        default='/storage/model/adaptive_vc/model')
    parser.add_argument('-summary_steps', default=100, type=int)
    parser.add_argument('-save_steps', default=5000, type=int)
    parser.add_argument('-tag', '-t', default='init')
    parser.add_argument('-iters', default=100000, type=int)

    args = parser.parse_args()

    # load config file
    with open(args.config) as f:
        config = yaml.load(f)

    solver = Solver(config=config, args=args)

    if args.iters > 0:
        solver.train(n_iterations=args.iters)
Esempio n. 5
0
                        type=int,
                        default=27596,
                        help='comment vocab size')
    parser.add_argument('-comment_max_len',
                        type=int,
                        default=100,
                        help='comment max length')
    parser.add_argument('-relative_pos',
                        type=bool,
                        default=True,
                        help='use relative position')
    parser.add_argument('-k', type=int, default=5, help='relative window size')
    parser.add_argument('-num_layers', type=int, default=3, help='layer num')
    parser.add_argument('-model_dim', type=int, default=256)
    parser.add_argument('-num_heads', type=int, default=8)
    parser.add_argument('-ffn_dim', type=int, default=2048)
    parser.add_argument('-dropout', type=float, default=0.2)

    args = parser.parse_args()
    return args


if __name__ == '__main__':
    args = parse()
    solver = Solver(args)

    if args.train:
        solver.train()
    elif args.test:
        solver.test()
Esempio n. 6
0
import os
from solver import Solver

solver = Solver()
solver.loadTask(
    os.path.join(os.path.dirname(__file__), "../templates/01t.json"))
solver.printTask()
Esempio n. 7
0
validation_path = '/data/david/fai_attr/transfered_data/val_v2'
# validation_path = '/data/david/fai_attr/transfered_data/partial_test_v2'

batch_size = 8
num_workers = 4
gpus = [6]

results_file_path = Path('./results/results_roadmap.md')
f_out = results_file_path.open('a')
f_out.write('%s :\n' %
            time.strftime("%Y-%m-%d-%H-%M", time.localtime(time.time())))
f_out.write('test path %s :\n' % validation_path)

gpus = [0]
solver = Solver(validation_path=validation_path)

if len(sys.argv) == 2:
    task = sys.argv[1]
    print("start validating task: %s" % task)
    assert task in task_list, "UNKOWN TASK"
    details = model_dict[task]

    val_acc, val_map, val_loss = solver.validate(
        None,
        model_path=details['model_path'],
        task=task,
        network=details['network'],
        batch_size=details['batch_size'],
        num_workers=details['num_workers'],
        gpus=gpus)
Esempio n. 8
0
def main(args):
    with open("./config.yml", 'r') as stream:
        try:
            config = yaml.safe_load(stream)
        except yaml.YAMLError as exc:
            print(exc)

    train_logger = SummaryWriter(log_dir=os.path.join(config['log'], 'train'),
                                 comment='training')

    solver = Solver(config)

    train_trans = image_transform(**config['train_transform'])
    config['train_dataset'].update({
        'transform': train_trans,
        'target_transform': None,
        'categories': config['class_names']
    })

    if config.get('val_dataset') is not None:
        config['val_dataset'].update({
            'transform': train_trans,
            'target_transform': None,
            'categories': config['class_names']
        })

    if args.train_prm:
        config['train_dataset'].update({'train_type': 'prm'})
        dataset = train_dataset(**config['train_dataset'])
        config['data_loaders']['dataset'] = dataset
        data_loader = get_dataloader(**config['data_loaders'])

        if config.get('val_dataset') is not None:
            config['val_dataset'].update({'train_type': 'prm'})
            dataset = train_dataset(**config['val_dataset'])
            config['data_loaders']['dataset'] = dataset
            val_data_loader = get_dataloader(**config['data_loaders'])
        else:
            val_data_loader = None

        solver.train_prm(data_loader, train_logger, val_data_loader)
        print('train prm over')

    if args.train_filling:
        proposals_trans = proposals_transform(**config['train_transform'])
        config['train_dataset'].update({
            'train_type': 'filling',
            'target_transform': proposals_trans,
        })

        dataset = train_dataset(**config['train_dataset'])
        config['data_loaders']['dataset'] = dataset
        data_loader = get_dataloader(**config['data_loaders'])

        solver.train_filling(data_loader, train_logger)
        print('train filling over')

    if args.run_demo:
        test_trans = image_transform(**config['test_transform'])
        config['test_dataset'].update({
            'image_size':
            config['test_transform']['image_size'],
            'transform':
            test_trans
        })
        dataset = test_dataset(**config['test_dataset'])
        config['test_data_loaders']['dataset'] = dataset
        data_loader = get_dataloader(**config['test_data_loaders'])

        solver.inference(data_loader)
        print('predict over')
Esempio n. 9
0
from visuals import GameWindow
from board import Board
from solver import Solver
from Tkinter import *
import random, sys, copy, time

root = Tk()

window = GameWindow(root)

random_res = []
for i in range(50):
    board = Board()
    window.update_view(board.generateState(board.grid))
    solver = Solver(board, window, root)
    solver.startSolver("random")
    random_res.append(int(2**board.bestTile))

print random_res
'''for i in range(1,50):
    board = Board()
    window.update_view( board.generateState(board.grid) )
    solver = Solver(board, window, root, train_nr=i)
#modes: random, upleftdownright, partialAI, onestepahead
    solver.startSolver("expectimax")'''

# TO PLAY THE GAME, UMCOMMENT THESE LINES
'''
while True:
    window.update_view( board.generateState(board.grid) )
    print "POINTS========> >  > " + str(board.points) + " <  < <========POINTS"
Esempio n. 10
0
train_display        = [train_loader.dataset[i] for i in range(display_size)]
train_display_images = torch.stack([item[0] for item in train_display]).to(device)
test_display         = [test_loader.dataset[i] for i in range(display_size)]
test_display_images  = torch.stack([item[0] for item in test_display]).to(device)

train_display_txt    = torch.stack([item[3] for item in train_display]).to(device)
train_display_txt_lens = torch.stack([item[4] for item in train_display]).to(device)
test_display_txt    = torch.stack([item[3] for item in test_display]).to(device)
test_display_txt_lens = torch.stack([item[4] for item in test_display]).to(device)

pretrained_embed=None
if opts.use_pretrained_embed:
    with open(config['pretrained_embed'], 'rb') as fin:
        pretrained_embed = pickle.load(fin)
# Setup model and data loader
trainer = Solver(config, device, pretrained_embed).to(device)
if config['use_pretrain']:
    trainer.init_network(config['gen_pretrain'], config['dis_pretrain'])

# Setup logger and output folders
model_name = os.path.splitext(os.path.basename(opts.config))[0]

train_writer = tensorboardX.SummaryWriter(os.path.join(opts.output_path + "/logs", model_name))
output_directory = os.path.join(opts.output_path + "/outputs", model_name)
checkpoint_directory, image_directory = prepare_sub_folder(output_directory)
shutil.copy(opts.config, os.path.join(output_directory, 'config.yaml')) # copy config file to output folder

# Start training
iterations = trainer.resume(checkpoint_directory, config) if opts.resume else 0
trainer.copy_nets()
Esempio n. 11
0
def test_empty():
    university = University()
    solver = Solver(university)
    res, _ , _ = solver.solve()
    assert res
Esempio n. 12
0
if __name__ == '__main__':
    """
    the cli program to run the model
    usage: python deepme.py <option>
    options: train -> train the new model
             develop -> develop the better model
             test -> run the test on the whole data set
             path/to/mat -> Run the prediction to .mat file

    """

    if len(sys.argv) == 2:
        mode = sys.argv[1]
        if mode == 'train':
            ecg = ECG(verbose=True)
            solver = Solver(ecg=ecg)
            solver.train()
        elif mode == 'test':
            solver = Solver()
            solver.test(sample_every=30)
        elif mode == 'develop':
            ecg = ECG(verbose=True)
            solver = Solver(ecg=ecg, develop=True)
            solver.train
        else:
            solver = Solver()
            solver.predict(mode)
    else:
        print("usage: python deepme.py <option>")
Esempio n. 13
0
    print("     \\/     ")
    print("   ......   ")
    print("     \\/     ")
    prettify(theList[len(theList) - 1])
    print("Steps to solve: " + str(len(theList)))

def printPretty(theList):
    for index, layout in enumerate(theList):
        prettify(layout)
        if index != len(theList) - 1:
            print("     \\/     ")
        else:
            print("")
    print("Steps to solve: " + str(len(theList)))

run = Solver().run
solvable = Solver().solvable

# printPretty(run([
#     [1, 2, 3],
#     [8, 0, 4],
#     [6, 7, 5]
# ]))
# print("")
# printPretty(run([
#     [2, 3, 5],
#     [4, 8, 1],
#     [0, 7, 6]
# ]))

def prepTest():
Esempio n. 14
0
    def cluster(self, X_train, y_dec_train, y_train, classes, batch_size, save_to, labeltype, update_interval=None):
        N = X_train.shape[0]
        self.best_args['update_interval'] = update_interval
        self.best_args['y_dec'] = y_dec_train 
        self.best_args['roi_labels'] = y_train
        self.best_args['classes'] = classes
        self.best_args['batch_size'] = batch_size
        
        # selecting batch size
        # [42*t for t in range(42)]  will produce 16 train epochs
        # [0, 42, 84, 126, 168, 210, 252, 294, 336, 378, 420, 462, 504, 546, 588, 630]
        test_iter = mx.io.NDArrayIter({'data': X_train}, 
                                      batch_size=N, shuffle=False,
                                      last_batch_handle='pad')
        args = {k: mx.nd.array(v.asnumpy(), ctx=self.xpu) for k, v in self.args.items()}
        ## embedded point zi 
        z = model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values()[0]
        
        # For visualization we use t-SNE (van der Maaten & Hinton, 2008) applied to the embedded points zi. It
        self.perplexity = 15
        self.learning_rate = 125
        # reconstruct wordy labels list(Y)==named_y
        named_y = [classes[kc] for kc in y_dec_train]
        self.best_args['named_y'] = named_y
        
        # To initialize the cluster centers, we pass the data through
        # the initialized DNN to get embedded data points and then
        # perform standard k-means clustering in the feature space Z
        # to obtain k initial centroids {mu j}
        kmeans = KMeans(self.best_args['num_centers'], n_init=20)
        kmeans.fit(z)
        args['dec_mu'][:] = kmeans.cluster_centers_
        
        ### KL DIVERGENCE MINIMIZATION. eq(2)
        # our model is trained by matching the soft assignment to the target distribution. 
        # To this end, we define our objective as a KL divergence loss between 
        # the soft assignments qi (pred) and the auxiliary distribution pi (label)
        solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.1, lr_scheduler=mx.misc.FactorScheduler(20*update_interval,0.5)) # , lr_scheduler=mx.misc.FactorScheduler(20*update_interval,0.4)) #0.01
        def ce(label, pred):
            return np.sum(label*np.log(label/(pred+0.000001)))/label.shape[0]
        solver.set_metric(mx.metric.CustomMetric(ce))

        label_buff = np.zeros((X_train.shape[0], self.best_args['num_centers']))
        train_iter = mx.io.NDArrayIter({'data': X_train}, 
                                       {'label': label_buff},
                                       batch_size=self.best_args['batch_size'],
                                       shuffle=False, last_batch_handle='roll_over')
        self.best_args['y_pred'] = np.zeros((X_train.shape[0]))
        self.best_args['acci'] = []
        self.best_args['bestacci'] = []
        self.ploti = 0
        figprogress = plt.figure(figsize=(20, 15))  
        print 'Batch_size = %f'% self.best_args['batch_size']
        print 'update_interval = %f'%  update_interval
        self.best_args['plot_interval'] = int(20*update_interval)
        print 'plot_interval = %f'%  self.best_args['plot_interval']
        self.maxAcc = 0.0
        
        def refresh(i): # i=3, a full epoch occurs every i=798/48
            if i%self.best_args['update_interval'] == 0:
                z = list(model.extract_feature(self.feature, args, None, test_iter, N, self.xpu).values())[0]
                                
                p = np.zeros((z.shape[0], self.best_args['num_centers']))
                self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
                # the soft assignments qi (pred)
                y_pred = p.argmax(axis=1)
                #print np.std(np.bincount(y_dec_train)), np.bincount(y_dec_train)
                print np.std(np.bincount(y_pred)), np.bincount(y_pred)
                
                #####################
                # Z-space CV RF classfier METRICS
                #####################
                # compare soft assignments with known labels (only B or M)
                print '\n... Updating i = %f' % i      
                allL = np.asarray(y_train)
                dataZspace = np.concatenate((z[allL!='K',:],  np.reshape(y_pred[allL!='K'],(-1, 1))), axis=1) 
                ydatalabels = np.asarray(allL[allL!='K']=='M').astype(int) # malignant is positive class
                
                cv = StratifiedKFold(n_splits=5)
                RFmodel = RandomForestClassifier(n_jobs=2, n_estimators=500, random_state=0, verbose=0)
                # Evaluate a score by cross-validation
                tprs = []; aucs = []
                mean_fpr = np.linspace(0, 1, 100)
                cvi = 0
                for train, test in cv.split(dataZspace, ydatalabels):
                    probas = RFmodel.fit(dataZspace[train], ydatalabels[train]).predict_proba(dataZspace[test])
                    # Compute ROC curve and area the curve
                    fpr, tpr, thresholds = roc_curve(ydatalabels[test], probas[:, 1])
                    # to create an ROC with 100 pts
                    tprs.append(interp(mean_fpr, fpr, tpr))
                    tprs[-1][0] = 0.0
                    roc_auc = auc(fpr, tpr)
                    aucs.append(roc_auc)
                    cvi += 1
               
                mean_tpr = np.mean(tprs, axis=0)
                mean_tpr[-1] = 1.0
                mean_auc = auc(mean_fpr, mean_tpr)
                                    
                # integer=5, to specify the number of folds in a (Stratified)KFold,
                #scores_BorM = cross_val_score(RFmodel, data, datalabels, cv=5)
                # compute Z-space Accuracy
                #Acc = scores_BorM.mean()
                Acc = mean_auc                       
                print "cvRF BorM mean_auc = %f " % Acc
                #print scores_BorM.tolist()
                    
                if(i==0):
                    tsne = TSNE(n_components=2, perplexity=self.perplexity, learning_rate=self.learning_rate,
                                init='pca', random_state=0, verbose=2, method='exact')
                    Z_tsne = tsne.fit_transform(z)        
                    self.best_args['initAcc'] = Acc
                    # plot initial z        
                    figinint = plt.figure()
                    axinint = figinint.add_subplot(1,1,1)
                    plot_embedding_unsuper_NMEdist_intenh(Z_tsne, named_y, axinint, title='kmeans init tsne: Acc={}'.format(Acc), legend=True)
                    figinint.savefig('{}//tsne_init_z{}_mu{}_{}.pdf'.format(save_to,self.best_args['znum'],self.best_args['num_centers'],labeltype), bbox_inches='tight')     
                    plt.close()                  
                    
                # save best args
                self.best_args['acci'].append( Acc )
                if(Acc >= self.maxAcc):
                    print 'Improving mean_auc = {}'.format(Acc)
                    for key, v in args.items():
                        self.best_args[key] = args[key]
                        
                    self.maxAcc = Acc
                    self.best_args['pbestacci'] = p
                    self.best_args['zbestacci']  = z 
                    self.best_args['bestacci'].append( Acc )
                    self.best_args['dec_mu'][:] = args['dec_mu'].asnumpy()
                
                if(i>0 and i%self.best_args['plot_interval']==0 and self.ploti<=15): 
                    # Visualize the progression of the embedded representation in a subsample of data
                    # For visualization we use t-SNE (van der Maaten & Hinton, 2008) applied to the embedded points zi. It
                    tsne = TSNE(n_components=2, perplexity=self.perplexity, learning_rate=self.learning_rate,
                         init='pca', random_state=0, verbose=2, method='exact')
                    Z_tsne = tsne.fit_transform(z)
                    axprogress = figprogress.add_subplot(4,4,1+self.ploti)
                    plot_embedding_unsuper_NMEdist_intenh(Z_tsne, named_y, axprogress, title="Epoch %d z_tsne Acc (%f)" % (i,Acc), legend=False)
                    self.ploti = self.ploti+1
                      
                ## COMPUTING target distributions P
                ## we compute pi by first raising qi to the second power and then normalizing by frequency per cluster:
                weight = 1.0/p.sum(axis=0) # p.sum provides fj
                weight *= self.best_args['num_centers']/weight.sum()
                p = (p**2)*weight
                train_iter.data_list[1][:] = (p.T/p.sum(axis=1)).T
                print np.sum(y_pred != self.best_args['y_pred']), 0.001*y_pred.shape[0]
                
                # For the purpose of discovering cluster assignments, we stop our procedure when less than tol% of points change cluster assignment between two consecutive iterations.
                # tol% = 0.001
                if i == self.best_args['update_interval']*200: # performs 1epoch = 615/3 = 205*1000epochs                     
                    self.best_args['y_pred'] = y_pred   
                    self.best_args['acci'].append( Acc )
                    return True 
                    
                self.best_args['y_pred'] = y_pred

        # start solver
        solver.set_iter_start_callback(refresh)
        solver.set_monitor(Monitor(20))
        solver.solve(self.xpu, self.loss, args, self.args_grad, None,
                     train_iter, 0, 1000000000, {}, False)
        self.end_args = args
        self.best_args['end_args'] = args
        
        # finish                
        figprogress = plt.gcf()
        figprogress.savefig('{}\\tsne_progress_z{}_mu{}_{}.pdf'.format(save_to,self.best_args['znum'],self.best_args['num_centers'],labeltype), bbox_inches='tight')    
        plt.close()    
        
         # plot final z        
        figfinal = plt.figure()
        axfinal = figfinal.add_subplot(1,1,1)
        tsne = TSNE(n_components=2, perplexity=self.perplexity, learning_rate=self.learning_rate,
             init='pca', random_state=0, verbose=2, method='exact')
        Z_tsne = tsne.fit_transform(self.best_args['zbestacci'])      
        plot_embedding_unsuper_NMEdist_intenh(Z_tsne, self.best_args['named_y'], axfinal, title='final tsne: Acc={}'.format(self.best_args['bestacci'][-1]), legend=True)
        figfinal.savefig('{}\\tsne_final_z{}_mu{}_{}.pdf'.format(save_to,self.best_args['znum'],self.best_args['num_centers'],labeltype), bbox_inches='tight')    
        plt.close()          

        outdict = {'initAcc':self.best_args['initAcc'],
                   'acci': self.best_args['acci'],
                   'bestacci': self.best_args['bestacci'],
                    'pbestacci':self.best_args['pbestacci'],
                    'zbestacci':self.best_args['zbestacci'],
                    'dec_mubestacci':self.best_args['dec_mu'],
                    'y_pred': self.best_args['y_pred'],
                    'named_y': self.best_args['named_y'],
                    'classes':self.best_args['classes'],
                    'num_centers': self.best_args['num_centers'],
                    'znum':self.best_args['znum'],
                    'update_interval':self.best_args['update_interval'],
                    'batch_size':self.best_args['batch_size']}  
                             
        return outdict
Esempio n. 15
0
        #print(data['b',mode][4])
    data['f', mode] = torch.utils.data.DataLoader(shuffle=False,
                                                  dataset=data['f', mode],
                                                  batch_size=8)
    data['b', mode] = torch.utils.data.DataLoader(shuffle=False,
                                                  dataset=data['b', mode],
                                                  batch_size=8)
    #data_not_train[mode] = torch.utils.data.DataLoader(shuffle=False, dataset= data_not_train[mode],batch_size= 1)
#print(data_not_train['dev'])
#print(pos_weight)
lr = 5e-6
front_model = TagValueModel(num_tags=8).to(device)
optimizer = optim.AdamW(front_model.parameters(), lr=lr)
scheduler = StepLR(optimizer, 1, gamma=0.9)
solver = Solver(device, tokenizer)
#solver.train(data['f','train'],data['f','dev'],front_model,optimizer,pos_weight=pos_weight['f','train'],part='front_',ver=ver)

end_model = TagValueModel(num_tags=12).to(device)
optimizer = optim.AdamW(end_model.parameters(), lr=lr)
scheduler = StepLR(optimizer, 1, gamma=0.9)
#solver = Solver(device,tokenizer)
#solver.train(data['b','train'],data['b','dev'],end_model,optimizer,pos_weight=pos_weight['b','train'],part='back_',ver=ver)

emb_model_f = TagValueModel(num_tags=8).to(device)
emb_model_b = TagValueModel(num_tags=12).to(device)
emb_model_f.load_state_dict(torch.load(f'ckpt_{ver}/front_tags.ckpt'))
emb_model_b.load_state_dict(torch.load(f'ckpt_{ver}/back_tags.ckpt'))
cls = ""
for mode in ['dev', 'train', 'test']:
    if not os.path.isfile(f'{path}/emb_b_{mode}{cls}.pkl'):
Esempio n. 16
0
def main(config):
    # print config.
    state = {k: v for k, v in config._get_kwargs()}
    print(state)

    # if use cuda.
    # os.environ['CUDA_VISIBLE_DEVICES'] = config.gpu_id
    use_cuda = torch.cuda.is_available()

    # Random seed
    if config.manualSeed is None:
        config.manualSeed = random.randint(1, 10000)
    random.seed(config.manualSeed)
    torch.manual_seed(config.manualSeed)
    if use_cuda:
        torch.cuda.manual_seed_all(config.manualSeed)
        torch.backends.cudnn.benchmark = True           # speed up training.

    # data loader
    from dataloader import get_loader
    if config.stage in ['finetune']:
        sample_size = config.finetune_sample_size
        crop_size = config.finetune_crop_size
    elif config.stage in ['keypoint']:
        sample_size = config.keypoint_sample_size
        crop_size = config.keypoint_crop_size

    # dataloader for pretrain
    train_dataset, val_dataset, train_loader, val_loader = get_loader(
        # train_path = config.train_path_for_pretraining,
        # val_path = config.val_path_for_pretraining,
        data_cfg_path = config.data_cfg_path,
        stage = config.stage,
        train_batch_size = config.train_batch_size,
        val_batch_size = config.val_batch_size,
        sample_size = sample_size,
        crop_size = crop_size,
        workers = config.workers)
    # # dataloader for finetune
    # train_loader_ft, val_loader_ft = get_loader(
    #     train_path = config.train_path_for_finetuning,
    #     val_path = config.val_path_for_finetuning,
    #     stage = config.stage,
    #     train_batch_size = config.train_batch_size,
    #     val_batch_size = config.val_batch_size,
    #     sample_size = sample_size,
    #     crop_size = crop_size,
    #     workers = config.workers)


    # load model
    from delf import Delf_V1
    model = Delf_V1(
        ncls = config.ncls,
        load_from = config.load_from,
        arch = config.arch,
        stage = config.stage,
        target_layer = config.target_layer,
        use_random_gamma_rescale = config.use_random_gamma_rescale)

    # device = torch.device("cuda")
    # model.to(device)
    # model = torch.nn.DataParallel(model).cuda()
    # model.load_weights()
    # import ipdb; ipdb.set_trace()

    # solver
    from solver import Solver
    epochs = config.num_epochs
    solver = Solver(config=config, model=model,
                    max_iters=int(len(train_dataset) / config.train_batch_size * epochs),
                    num_iters_per_epoch=int(len(train_dataset)/config.train_batch_size))

    # train/test for N-epochs. (50%: pretain with datasetA, 50%: finetune with datasetB)
    for epoch in range(epochs):
        # if epoch < int(epochs * 0.5):
        #     print('[{:.1f}] load pretrain dataset: {}'.format(
        #         float(epoch) / epochs,
        #         config.train_path_for_pretraining))
        #     train_loader = train_loader_pt
        #     val_loader = val_loader_pt
        # else:
        #     print('[{:.1f}] load finetune dataset: {}'.format(
        #         float(epoch) / epochs,
        #         config.train_path_for_finetuning))
        #     train_loader = train_loader_ft
        #     val_loader = val_loader_ft

        solver.train('train', epoch, train_loader, val_loader)
        solver.train('val', epoch, train_loader, val_loader)

    print('Congrats! You just finished DeLF training.')
Esempio n. 17
0
from solver import Solver
from data_loader import get_loader
from configs import get_config
from pprint import pprint

if __name__ == '__main__':
    config = get_config()
    pprint(vars(config))

    data_loader = get_loader(batch_size=config.batch_size,
                             max_size=config.vocab_size,
                             is_train=True,
                             data_dir=config.data_dir)

    solver = Solver(config, data_loader)
    solver.build(is_train=True)
    solver.train()
   np.save('mnist_data.npy',X)
   np.save('mnist_labels.npy',y)

X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=0.1)

X_train = X_train.reshape(-1, 1, 28, 28)
X_val = X_val.reshape(-1, 1, 28, 28)
X_test = X_test.reshape(-1, 1, 28, 28)
# N, D = X_train.shape
print (X_train.shape)
data = {'X_train': X_train, 'y_train': y_train,
        'X_val': X_val, 'y_val': y_val,
        'X_test': X_test, 'y_test': y_test
        }

model = ConvNet()
solver = Solver(model, data,
                 update_rule='sgd',
                 optim_config={
                   'learning_rate': 2e-3,
                 },
                 lr_decay=1,
                 num_epochs=1, batch_size=50,
                 print_every=2)

solver.train()

acc = solver.check_accuracy(X=X_test, y=y_test)
print(acc)
Esempio n. 19
0
def main(config):
    cudnn.benchmark = True
    if config.model_type not in ['U_Net','R2U_Net','AttU_Net','R2AttU_Net']:
        print('ERROR!! model_type should be selected in U_Net/R2U_Net/AttU_Net/R2AttU_Net')
        print('Your input for model_type was %s'%config.model_type)
        return

    # Create directories if not exist
    if not os.path.exists(config.model_path):
        os.makedirs(config.model_path)
    if not os.path.exists(config.result_path):
        os.makedirs(config.result_path)
    config.result_path = os.path.join(config.result_path,config.model_type)
    if not os.path.exists(config.result_path):
        os.makedirs(config.result_path)
    
    #lr = random.random()*0.0005 + 0.0000005
    lr = random.random()*0.0005 + config.lr
    augmentation_prob= random.random()*0.7
    #epoch = config.num_epochs#random.choice([100,150,200,250])
    decay_ratio = random.random()*0.8
    decay_epoch = int(config.num_epochs*decay_ratio)

    config.augmentation_prob = augmentation_prob
    #config.num_epochs = epoch
    config.lr = lr
    config.num_epochs_decay = decay_epoch

    print(config)
        
    train_loader = get_loader(image_path=config.train_path,
                            image_size=config.image_size,
                            batch_size=config.batch_size,
                            num_workers=config.num_workers,
                            mode='train',
                            augmentation_prob=config.augmentation_prob)
    valid_loader = get_loader(image_path=config.valid_path,
                            image_size=config.image_size,
                            batch_size=config.batch_size,
                            num_workers=config.num_workers,
                            mode='valid',
                            augmentation_prob=0.)
    test_loader = get_loader(image_path=config.test_path,
                            image_size=config.image_size,
                            batch_size=config.batch_size,
                            num_workers=config.num_workers,
                            mode='test',
                            augmentation_prob=0.)

    solver = Solver(config, train_loader, valid_loader, test_loader)

    
    # Train and sample the images
    if config.mode == 'train':
        solver.train(config.pretrained, config.best_score)
    elif config.mode == 'test':
        config.threshold = config.threshold/100.0
        print(config.threshold)
        solver.test(config.testmodel_path, config.Img_savepath, config.Mask_savepath, config.Pre_savepath, config.threshold)
    elif config.mode == 'pre':
        config.threshold = config.threshold/100.0
        print(config.threshold)
        solver.predict(config.testmodel_path, config.Img_savepath, config.Pre_savepath, config.Mask_savepath, config.threshold)
Esempio n. 20
0
def main():
    # Reproducibility
    np.random.seed(12345)
    torch.manual_seed(12345)

    # Preparation
    config = get_parameters()

    # Logging configuration
    writer = None
    if config.tensorboard:
        path_tensorboard = f'{config.logging_dir}/{config.experiment_description}'
        if config.debug_mode:  # Clear tensorboard when debugging
            if os.path.exists(path_tensorboard):
                shutil.rmtree(path_tensorboard)
        writer = SummaryWriter(path_tensorboard)

    data_loader_train, data_loader_valid, data_loader_test = get_data(config)

    if config.use_time_freq:
        transforms = get_time_frequency_transform(config)
    else:
        transforms = None

    # =====================================================================
    # Visualize some data
    tmp_audio = None
    tmp_spec = None
    tmp_data, targets, _ = data_loader_train.dataset[
        0]  # audio is [channels, timesteps]

    # Is the data audio or image?
    if len(tmp_data.shape) == 2:
        tmp_audio = tmp_data
    else:
        tmp_spec = tmp_data

    if config.use_time_freq:
        tmp_spec = transforms(
            tmp_audio)  # spec is [channels, freq_bins, frames]

    if tmp_spec is not None:
        utils.show_spectrogram(tmp_spec, config)

    if writer is not None:
        if tmp_audio is not None:
            # Store 5 secs of audio
            ind = tmp_audio.shape[-1] if tmp_audio.shape[
                -1] <= 5 * config.original_fs else 5 * config.original_fs
            writer.add_audio('input_audio', tmp_audio[:, 0:ind], None,
                             config.original_fs)

            tmp_audios = []
            fnames = []
            for i in range(4):
                aud, _, fn = data_loader_train.dataset.dataset[i]
                fnames.append(fn)
                tmp_audios.append(aud)
            writer.add_figure(
                'input_waveform',
                utils.show_waveforms_batch(tmp_audios, fnames, config), None)

        # Analyze some spectrograms
        if tmp_spec is not None:
            img_tform = tforms_vision.Compose([
                tforms_vision.ToPILImage(),
                tforms_vision.ToTensor(),
            ])

            writer.add_image('input_spec', img_tform(tmp_spec),
                             None)  # Raw tensor
            writer.add_figure('input_spec_single',
                              utils.show_spectrogram(tmp_spec, config),
                              None)  # Librosa

            if config.use_time_freq:
                tmp_specs = []
                fnames = []
                for i in range(4):
                    aud, _, fn = data_loader_train.dataset.dataset[i]
                    tmp_specs.append(transforms(aud))
                    fnames.append(fn)

                writer.add_figure(
                    'input_spec_batch',
                    utils.show_spectrogram_batch(tmp_specs, fnames, config),
                    None)
                writer.add_figure('input_spec_histogram',
                                  utils.get_spectrogram_histogram(tmp_specs),
                                  None)
                del tmp_specs, fnames, aud, fn, i

    # Class Histograms
    if not config.dataset_skip_class_hist:
        fig_classes = utils.get_class_histograms(
            data_loader_train.dataset,
            data_loader_valid.dataset,
            data_loader_test.dataset,
            one_hot_encoder=utils.OneHot
            if config.dataset == 'MNIST' else None,
            data_limit=200 if config.debug_mode else None)
        if writer is not None:
            writer.add_figure('class_histogram', fig_classes, None)

    # =====================================================================
    # Train and Test
    solver = Solver(data_loader_train, data_loader_valid, data_loader_test,
                    config, writer, transforms)
    solver.train()
    scores, true_class, pred_scores = solver.test()

    # =====================================================================
    # Save results

    np.save(open(os.path.join(config.result_dir, 'true_class.npy'), 'wb'),
            true_class)
    np.save(open(os.path.join(config.result_dir, 'pred_scores.npy'), 'wb'),
            pred_scores)

    utils.compare_predictions(true_class, pred_scores, config.result_dir)

    if writer is not None:
        writer.close()
Esempio n. 21
0
    def cluster(self, X, y=None, update_interval=None):
        N = X.shape[0]
        if not update_interval:
            update_interval = N
        batch_size = 256
        test_iter = mx.io.NDArrayIter({'data': X},
                                      batch_size=batch_size,
                                      shuffle=False,
                                      last_batch_handle='pad')
        args = {
            k: mx.nd.array(v.asnumpy(), ctx=self.xpu)
            for k, v in self.args.items()
        }
        z = list(
            model.extract_feature(self.feature, args, None, test_iter, N,
                                  self.xpu).values())[0]
        kmeans = KMeans(self.num_centers, n_init=20)
        kmeans.fit(z)
        args['dec_mu'][:] = kmeans.cluster_centers_
        solver = Solver('sgd', momentum=0.9, wd=0.0, learning_rate=0.01)

        def ce(label, pred):
            return np.sum(label * np.log(label /
                                         (pred + 0.000001))) / label.shape[0]

        solver.set_metric(mx.gluon.metric.CustomMetric(ce))

        label_buff = np.zeros((X.shape[0], self.num_centers))
        train_iter = mx.io.NDArrayIter({'data': X}, {'label': label_buff},
                                       batch_size=batch_size,
                                       shuffle=False,
                                       last_batch_handle='roll_over')
        self.y_pred = np.zeros((X.shape[0]))

        def refresh(i):
            if i % update_interval == 0:
                z = list(
                    model.extract_feature(self.feature, args, None, test_iter,
                                          N, self.xpu).values())[0]
                p = np.zeros((z.shape[0], self.num_centers))
                self.dec_op.forward([z, args['dec_mu'].asnumpy()], [p])
                y_pred = p.argmax(axis=1)
                print(np.std(np.bincount(y_pred)), np.bincount(y_pred))
                print(np.std(np.bincount(y.astype(np.int))),
                      np.bincount(y.astype(np.int)))
                if y is not None:
                    print(cluster_acc(y_pred, y)[0])
                weight = 1.0 / p.sum(axis=0)
                weight *= self.num_centers / weight.sum()
                p = (p**2) * weight
                train_iter.data_list[1][:] = (p.T / p.sum(axis=1)).T
                print(np.sum(y_pred != self.y_pred), 0.001 * y_pred.shape[0])
                if np.sum(y_pred != self.y_pred) < 0.001 * y_pred.shape[0]:
                    self.y_pred = y_pred
                    return True
                self.y_pred = y_pred

        solver.set_iter_start_callback(refresh)
        solver.set_monitor(Monitor(50))

        solver.solve(self.xpu, self.loss, args, self.args_grad, None,
                     train_iter, 0, 1000000000, {}, False)
        self.end_args = args
        if y is not None:
            return cluster_acc(self.y_pred, y)[0]
        else:
            return -1
Esempio n. 22
0
import numpy as np
import datetime
from world import World
from obstacle_creator import ObstacleCreator
from solver import Solver





start = [7, 7]
goal = [4,4]
size = [10,10]

obstacle_creator = ObstacleCreator(size)
solver = Solver(size, start, goal, obstacle_creator.map)

while (True):
    print 'new cycle', datetime.datetime.now()
    solver.solve_distance()
    print 'solver end', datetime.datetime.now()
    solver.find_shortest_path()
    print 'find path end', datetime.datetime.now()
    world = World(size, obstacle_creator.map, solver.distance, solver.parent, solver.shortest_path)
    world.world_reset()
    print 'world reset', datetime.datetime.now()
    world.draw_distance()
    print 'draw_distance end', datetime.datetime.now()
    world.draw_shortest_path()
    print 'shortest_path', datetime.datetime.now()
    #world.draw_parent()
Esempio n. 23
0
def main(arglist):
    """
    Visualise the policy your code produces for the given map file.
    :param arglist: [map_file_name, mode]
    """

    if len(arglist) != 1:
        print(
            "Running this file visualises the path your code produces for the given map file. "
        )
        print("Usage: policy_visualiser.py [map_file_name]")
        return

    input_file = arglist[0]
    game_map = LaserTankMap.process_input_file(input_file)
    solver = Solver(game_map)

    mark = 0

    # do offline computation
    if game_map.method == 'vi':
        if not WINDOWS:
            signal.signal(signal.SIGALRM, timeout_handler)
            signal.alarm(game_map.time_limit + 1)
        try:
            solver.run_value_iteration()
        except TimeOutException:
            print("/!\\ Ran overtime during run_value_iteration( )")
            sys.exit(mark)
        except:
            traceback.print_exc()
            print("/!\\ Crash occurred during run_value_iteration( )")
            sys.exit(mark)
        if not WINDOWS:
            signal.alarm(0)
    elif game_map.method == 'pi':
        if not WINDOWS:
            signal.signal(signal.SIGALRM, timeout_handler)
            signal.alarm(game_map.time_limit + 1)
        try:
            solver.run_policy_iteration()
        except TimeOutException:
            print("/!\\ Ran overtime during run_policy_iteration( )")
            sys.exit(mark)
        except:
            traceback.print_exc()
            print("/!\\ Crash occurred during run_policy_iteration( )")
            sys.exit(mark)
        if not WINDOWS:
            signal.alarm(0)

    # simulate an episode (using de-randomised transitions) and compare total reward to benchmark
    total_reward = 0
    state = game_map.make_clone()
    state.render()
    seed = hash(input_file)  # use file name as RNG seed
    for i in range(100):
        new_seed = seed + 1
        if not WINDOWS:
            signal.signal(signal.SIGALRM, timeout_handler)
            if game_map.method == 'mcts':
                signal.alarm(game_map.time_limit + 1)
            else:
                signal.alarm(1)
        try:
            if game_map.method == 'mcts':
                action = solver.get_mcts_policy(state)
            else:
                action = solver.get_offline_policy(state)
        except TimeOutException:
            if game_map.method == 'mcts':
                print("/!\\ Ran overtime during get_mcts_policy( )")
            else:
                print("/!\\ Ran overtime during get_offline_policy( )")
            sys.exit(mark)
        except:
            traceback.print_exc()
            if game_map.method == 'mcts':
                print("/!\\ get_mcts_policy( ) caused crash during evaluation")
            else:
                print(
                    "/!\\ get_offline_policy( ) caused crash during evaluation"
                )
            sys.exit(mark)
        if not WINDOWS and not DEBUG_MODE:
            signal.alarm(0)
        r = state.apply_move(action, new_seed)
        state.render()
        total_reward += r
        if r == game_map.goal_reward or r == game_map.game_over_cost:
            break
        seed = new_seed

        time.sleep(0.5)
Esempio n. 24
0
if __name__ == '__main__':
    config = get_config(mode='test')

    print('Loading Vocabulary...')
    vocab = Vocab()
    vocab.load(config.word2id_path, config.id2word_path)
    print(f'Vocabulary size: {vocab.vocab_size}')

    config.vocab_size = vocab.vocab_size

    data_loader = get_loader(
        sentences=load_pickle(config.sentences_path),
        conversation_length=load_pickle(config.conversation_length_path),
        sentence_length=load_pickle(config.sentence_length_path),
        vocab=vocab,
        batch_size=config.batch_size)

    if config.model in VariationalModels:
        solver = VariationalSolver(config,
                                   None,
                                   data_loader,
                                   vocab=vocab,
                                   is_train=False)
        solver.build()
        solver.importance_sample()
    else:
        solver = Solver(config, None, data_loader, vocab=vocab, is_train=False)
        solver.build()
        solver.test()
Esempio n. 25
0
def runDPLL(nombreArchivo=None):
    s = Solver()
    s.read(nombreArchivo)
    is_sat = "SAT" if s.solve(0) else "UNSAT"
    output = s.output_dimacs()
    return (s.vars, s.number_clauses, is_sat, output)
Esempio n. 26
0
import sys
import yaml
from solver import Solver

yml_path = sys.argv[1]
with open(yml_path) as f:
    config = yaml.load(f)

solver = Solver(**(config['model_params']))
solver.fit(**(config['fit_params']))
                        default='/main_folder/results')

    # Step size
    parser.add_argument('--log_step', type=int, default=10)
    parser.add_argument('--sample_step', type=int, default=150)
    parser.add_argument('--model_save_step', type=int, default=400)

    config = parser.parse_args()
    print(config)
    cudnn.benchmark = True

    # Create directories if not exist
    if not os.path.exists(config.log_path):
        os.makedirs(config.log_path)
    if not os.path.exists(config.model_path):
        os.makedirs(config.model_path)
    if not os.path.exists(config.sample_path):
        os.makedirs(config.sample_path)
    if not os.path.exists(config.test_path):
        os.makedirs(config.test_path)
    face_data_loader = return_loader(config.face_crop_size, config.im_size,
                                     config.batch_size, config.mode)
    # Solver
    solver = Solver(face_data_loader, config)

    if config.mode == 'train':
        solver.train()

    elif config.mode == 'test':
        solver.test()
Esempio n. 28
0
num_train = 100
small_data = {
    'X_train': data['X_train'][:num_train],
    'y_train': data['y_train'][:num_train],
    'X_val': data['X_val'],
    'y_val': data['y_val'],
}

model = ThreeLayerConvNet(weight_scale=1e-2)

solver = Solver(model,
                mini_data,
                num_epochs=15,
                batch_size=50,
                update_rule='adam',
                optim_config={
                    'learning_rate': 1e-4,
                },
                verbose=True,
                print_every=1)
solver.train()

plt.subplot(2, 1, 1)
plt.plot(solver.loss_history, 'o')
plt.xlabel('iteration')
plt.ylabel('loss')

plt.subplot(2, 1, 2)
plt.plot(solver.train_acc_history, '-o')
plt.plot(solver.val_acc_history, '-o')
plt.legend(['train', 'val'], loc='upper left')
Esempio n. 29
0
from solver import Solver
import matplotlib.pyplot as plt
if __name__ == '__main__':
    config = {
        'num_units': 128,
        'num_neighbours': 4,
        'input_length': 8,
        'output_length': 4,
    }

    optimizer_config = {
        'learning_rate': 1e-3,
    }

    database = Database()
    solver = Solver(**config)

    data_generator = database.get_social_lstm_train_data(num_data=16)

    num_epoch = 10
    num_batches = 20

    def batch_generator():
        for i in range(num_batches):
            yield next(data_generator)

    def epoch_samples_generator():
        for epoch in range(num_epoch):
            yield batch_generator()

    training_history = solver.train(
Esempio n. 30
0
import torch
import numpy as np
import torch.nn as nn
import torch.nn.functional as F
from solver import Solver
from resnet import ResNet, BlockType


class ResNet152(ResNet):
    def add_res_layers(self):
        self._make_layers(BlockType.BOTTLE_NECK, 64, 64, 3)
        self._make_layers(BlockType.BOTTLE_NECK, 256, 128, 8)
        self._make_layers(BlockType.BOTTLE_NECK, 512, 256, 36)
        self._make_layers(BlockType.BOTTLE_NECK, 1024, 512, 3)
        self.layers.append(['conv', 2048, 512, 1, 0])


solver = Solver(train_percentage=0.95, train_batch_size=512)
model = ResNet152(solver.num_label)
solver.train_model(model,
                   warmup_epochs=10,
                   num_epoch_to_log=5,
                   learning_rate=1e-3,
                   weight_decay=0.001,
                   epochs=70,
                   checkpoint='checkpoint/resnet152')
solver.test(model)
new_model = solver.caribrate(model)
solver.test_caribrate(new_model)