Пример #1
0
 def set_dataset(self, path_dataset, num_examples_train, num_examples_test, N_train, N_test):
     self.gen = Generator(path_dataset, args.path_tsp)
     self.gen.num_examples_train = num_examples_train
     self.gen.num_examples_test = num_examples_test
     self.gen.N_train = N_train
     self.gen.N_test = N_test
     self.gen.load_dataset()
Пример #2
0
def create_gen(path_dataset, num_examples_test, num):
    gen = Generator(path_dataset, args.path_tsp)
    gen.num_examples_train = 1
    gen.num_examples_test = num_examples_test
    gen.N_train = 10
    gen.N_test = num
    gen.load_dataset()
    return gen
Пример #3
0
def setup():
    logger = Logger(args.path_logger)
    logger.write_settings(args)
    config = make_config(args)
    model = base_model.BaseModel(config)
    siamese_gnn = siamese.Siamese(model).to(device)
    gen = Generator()
    gen.set_args(vars(args))
    if not args.real_world_dataset:
        gen.load_dataset()
    return siamese_gnn, logger, gen
Пример #4
0
def make_cv_predictions(model_id, config, dataset_dir,
                        eval_protocol, n_folds=5):

    function = getattr(data_generator, config['preprocess_fn'])
    preprocess_fn = partial(function, **config['preprocess'])
    generator = Generator(path=dataset_dir,
                          IDs=eval_protocol.path.tolist(),
                          preprocessing_fn=preprocess_fn,
                          shuffle=False, batch_size=128,
                          **config['generator'])

    preds = []
    for i in range(n_folds):
        model = load_model(os.path.join(
            MODELS_PATH, 'model_{}_{}.h5'.format(model_id, i)))
        preds.append(model.predict_generator(
            generator, use_multiprocessing=True, workers=6, verbose=0))

    return np.array(preds).mean(axis=0)
Пример #5
0
def run(model_id):
    """Run experiment."""

    config = configs[model_id]
    logger.info('\n\n\ntrain model {}'.format(model_id))

    # prepare data
    if config['preprocess_fn'] is not None:
        function = getattr(data_generator, config['preprocess_fn'])
        preprocess_fn = partial(function, **config['preprocess'])
    else:
        preprocess_fn = None
    generator = Generator(path=PATH_TRAIN,
                          IDs=meta_train.index.tolist(),
                          labels=meta_train[['target']],
                          preprocessing_fn=preprocess_fn,
                          shuffle=False, batch_size=64,
                          **config['generator'])
    X, y = generate_train_data(generator, meta_train)
    logger.info('X shape: {}, y shape: {}'.format(X.shape, y.shape))

    # define model
    model_function = getattr(models, config['model_name'])
    nn_model = partial(model_function,
                       input_shape=(X.shape[1:]),
                       **config['model_params'])
    nn_model().summary(print_fn=logger.info)
    model = KerasModel(nn_model, logger=logger, **config['train'])

    # train and save model
    cross_val = CrossValidation(X=X, y=y, Xtest=X[:100],
                                logger=logger, **config['cv'])
    pred, pred_test, metrics, trained_models = cross_val.run_cv(model)

    for i, model in enumerate(trained_models):
        path = os.path.join(MODELS_PATH, 'model_{}_{}.h5'.format(model_id, i))
        model.save(path)
def train(args):
    if os.path.exists(args.out_model_dir):
        shutil.rmtree(args.out_model_dir)
    create_folder(args.out_model_dir)
    num_classes = cfg.num_classes
    # Load training & testing data
    (tr_x, tr_y, tr_na_list) = load_hdf5(args.tr_hdf5_path, verbose=1)
    (te_x, te_y, te_na_list) = load_hdf5(args.te_hdf5_path, verbose=1)
    print("")

    # Scale data
    tr_x = do_scale(tr_x, args.scaler_path, verbose=1)
    te_x = do_scale(te_x, args.scaler_path, verbose=1)
    # Build model
    (_, n_time, n_freq) = tr_x.shape

    #pdb.set_trace()

    input = Input(shape=(n_time, n_freq), name='input_layer')
    input_ = Reshape((n_time, n_freq, 1))(input)
    '''
    block1 = Conv_BN(input_, 8, (3, 3), act="relu")
    block1 = Conv_BN(block1, 32, (3, 3), act="relu")
    block1 = Conv_BN(block1, 64, (3, 3), act="relu")

    block1 = block_a(input_, 8)
    block1 = block_a(block1, 32)
    block1 = block_a(block1, 64)
    '''
    block1 = block_b(input_, 8)
    block1 = block_b(block1, 32)
    block1 = block_b(block1, 64)
    block1 = MaxPooling2D(pool_size=(1, 2))(block1)

    block2 = block_c(block1, 64)
    block2 = MaxPooling2D(pool_size=(1, 2))(block2)

    block3 = block_c(block2, 64)
    block3 = MaxPooling2D(pool_size=(1, 2))(block3)

    block4 = block_c(block3, 64)
    block4 = MaxPooling2D(pool_size=(1, 2))(block4)

    cnnout = Conv_BN(block4, 128, (1, 1), act="relu", bias=True)
    cnnout = MaxPooling2D(pool_size=(1, 2))(cnnout)
    cnnout = Reshape((240, 256))(cnnout)

    rnn = Bidirectional(
        GRU(128,
            activation='relu',
            return_sequences=True,
            kernel_regularizer=regularizers.l2(0.01),
            recurrent_regularizer=regularizers.l2(0.01)))(cnnout)

    out = TimeDistributed(Dense(
        num_classes,
        activation='softmax',
        kernel_regularizer=regularizers.l2(0.0),
    ),
                          name='output_layer')(rnn)

    model = Model(input, out)
    model.summary()

    # Compile model
    adam = optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, decay=0.009)
    sgd = optimizers.SGD(lr=0.01, momentum=0.9, decay=0.0)
    model.compile(loss=focal_loss(alpha=[1, 1, 1, 1], gamma=1),
                  optimizer="adam",
                  metrics=[myacc(threshold=0.5)])

    # Save model callback
    filepath = os.path.join(
        args.out_model_dir,
        "aed-batchsize_50-lr_0.01-{epoch:04d}-{val_Acc:.4f}.hdf5")
    save_model = ModelCheckpoint(filepath=filepath,
                                 monitor='val_Acc',
                                 verbose=0,
                                 save_best_only=False,
                                 save_weights_only=False,
                                 mode='auto',
                                 period=1)

    # Train
    '''
    history=model.fit(  x=tr_x, 
			y=tr_y, 
			batch_size=50, 
			epochs=200, 
			verbose=1,
			shuffle=True,
			class_weight="auto", 
			callbacks=[save_model], 
			validation_data=(te_x,te_y)
		      ) 

    '''
    # Data generator
    gen = Generator(batch_size=50, type='train')
    history = model.fit_generator(
        generator=gen.generate([tr_x], [tr_y]),
        steps_per_epoch=300,  # 100 iters is called an 'epoch'
        epochs=100,  # Maximum 'epoch' to train
        verbose=1,
        class_weight="auto",
        callbacks=[save_model],
        validation_data=(te_x, te_y))

    with open('src/log.py', 'w') as f:
        f.write("history=")
        f.write(str(history.history))
if model_type == '512':
    img_height, img_width = 512, 512
    prior_box_configs = prior_box_configs_512
variances = [0.1, 0.1, 0.2, 0.2]

prior_boxes = get_prior_boxes(img_width, img_height, prior_box_configs,
                              variances)
#pickle.dump(prior_boxes, open('default_prior_boxes_{}x{}.pkl'.format(img_height, img_width), 'wb'))
bbox_util = BBoxUtility(num_classes, prior_boxes, use_tf=True)

data = pickle.load(open(data_file, 'rb'))
keys = data.keys()
num_train = int(round(0.8 * len(keys)))
train_keys, val_keys = keys[:num_train], keys[num_train:]

data_gen = Generator(data, bbox_util, 1, path_prefix, train_keys, val_keys,
                     (img_height, img_width))

model = SSD300((img_height, img_width, 3), num_classes=num_classes)
if model_type == '512':
    model = SSD512((img_height, img_width, 3), num_classes=num_classes)
model.compile(optimizer=Adam(lr=3e-4),
              loss=MultiboxLoss(num_classes, neg_pos_ratio=3.0).compute_loss)
model.summary()

nb_epochs = 120
callbacks = [
    ModelCheckpoint('checkpoints/weights.{epoch:02d}-{val_loss:.2f}.hdf5',
                    save_weights_only=True),
    ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=5, verbose=1)
]
history = model.fit_generator(data_gen.generate(True),
Пример #8
0
        loss_single, acc_single = test_single(gnn, gen, n_classes, it)
        loss_lst[it] = loss_single
        acc_lst[it] = acc_single
        torch.cuda.empty_cache()
    print('Avg test loss', np.mean(loss_lst))
    print('Avg test acc', np.mean(acc_lst))
    print('Std test acc', np.std(acc_lst))


def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)


if __name__ == '__main__':

    gen = Generator()
    gen.N_train = args.N_train
    gen.N_test = args.N_test
    gen.edge_density = args.edge_density
    gen.p_SBM = args.p_SBM
    gen.q_SBM = args.q_SBM
    gen.random_noise = args.random_noise
    gen.noise = args.noise
    gen.noise_model = args.noise_model
    gen.generative_model = args.generative_model
    gen.n_classes = args.n_classes

    torch.backends.cudnn.enabled = False

    if (args.mode == 'test'):
        print('In testing mode')
Пример #9
0
    def __init__(self,
                 batch = 64,
                 Class = 2,
                 box_num = [5,4,4,4,4,4],
                 lr = 0.001,
                 load_pretrain = False,
                 model = 1,
                 aug = False,
                 lr_updata = False,
                 uda = False
                 ):
    
        if model == 1:
    
            self.model = Model(Class = Class, box_num = box_num)
            self.m = 1
    
        elif model == 2:
    
            self.model = Inception(5)
            self.m = 2
    
        self.batch = batch
        self.Class = Class
        self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        if torch.cuda.device_count() > 1:
    
            print('Lets use', torch.cuda.device_count(), 'GPUs!')
            self.model = nn.DataParallel(self.model)
            
        self.uda = uda
        self.UDA_model = UDA().to(self.device)
        self.UDA_img = self.get_test_img()
        self.model = self.model.to(self.device)
        self.load_pretrain = load_pretrain
        self.optimizer = Adam(self.model.parameters(), lr = lr, weight_decay = 0.0005)
        self.optimizer1 = Adam(self.UDA_model.parameters(), lr = lr, weight_decay = 0.0005)

        self.lambda1 = lambda epoch : 0.95**epoch
        self.lambda2 = lambda epoch : 0.99**epoch
        self.scheduler = lr_scheduler.LambdaLR(self.optimizer, lr_lambda = self.lambda1)
        self.scheduler1 = lr_scheduler.LambdaLR(self.optimizer1, lr_lambda = self.lambda2)
    
        if self.load_pretrain == True:
    
            if lr_updata==True:
                load_model(self.model, m=self.m)
    
            else:
    
                load_model(self.model, self.optimizer, self.scheduler, self.m)
    
            self.train_loss = ['train_loss']
            self.train_acc = ['train_acc']
            self.verify_loss = ['verify_loss']
            self.verify_acc = ['verify_acc']
            self.lr = ['learning rate']

            data = pd.read_csv('./train_data'+str(self.m)+'.csv')
            self.train_loss.extend(data['train_loss'].tolist())
            self.train_acc.extend(data['train_acc'].tolist())
            self.verify_loss.extend(data['verify_loss'].tolist())
            self.verify_acc.extend(data['verify_acc'].tolist())
            self.lr.extend(data['learning rate'].tolist())
    
        else:
    
            self.train_loss = ['train_loss']
            self.train_acc = ['train_acc']
            self.verify_loss = ['verify_loss']
            self.verify_acc = ['verify_acc']
            self.lr = ['learning rate']

            for i in self.model.parameters():
                if len(i.shape)>=2:
                    xavier_normal_(i)
    
        self.train_generator = Generator(batch = batch, mode = 'train',aug=aug)
        self.train_generator2 = Generator(batch = batch, mode = 'val',aug=aug)
        self.test_generator = Generator(batch = batch, mode = 'val')
Пример #10
0

if __name__ == '__main__':
    logger = Logger(args.path)
    logger.write_settings(args)
    DCN = DivideAndConquerNetwork(input_size,
                                  args.batch_size,
                                  args.num_units_merge,
                                  args.rnn_layers,
                                  args.grad_clip_merge,
                                  args.num_units_split,
                                  args.split_layers,
                                  args.grad_clip_split,
                                  beta=args.beta)
    if args.load_split is not None:
        DCN.load_split(args.load_split)
    if args.load_merge is not None:
        DCN.load_merge(args.load_merge)
    if torch.cuda.is_available():
        DCN.cuda()
    gen = Generator(args.num_examples_train, args.num_examples_test,
                    args.path_dataset, args.batch_size)
    gen.load_dataset()
    DCN.batch_size = args.batch_size
    DCN.merge.batch_size = args.batch_size
    DCN.split.batch_size = args.batch_size
    if args.mode == 'train':
        train(DCN, logger, gen)
    elif args.mode == 'test':
        test(DCN, gen)
Пример #11
0
        plt.plot(iters, self.cost_test, 'b')
        print('COST ORACLE', self.cost_test_oracle[-1])
        plt.plot(iters, self.cost_test_oracle, 'r')
        plt.xlabel('iterations')
        plt.ylabel('Mean cost')
        plt.title('Mean cost Testing with beam_size : {}'.format(beam_size))
        plt.tight_layout(pad=0.8, w_pad=0.5, h_pad=2.0)
        path = os.path.join(self.path_dir, 'testing.png')
        plt.savefig(path)


if __name__ == '__main__':
    path_dataset = '/data/anowak/TSP/'
    path_logger = '/home/anowak/tmp/TSP1/'
    path_tsp = '/home/anowak/QAP_pt/src/tsp/LKH/'
    gen = Generator(path_dataset, path_tsp, mode='CEIL_2D')
    gen.num_examples_train = 200
    gen.num_examples_test = 40
    gen.J = 4
    gen.load_dataset()
    sample = gen.sample_batch(2, cuda=torch.cuda.is_available())
    W = sample[0][0][:, :, :, 1]  # weighted adjacency matrix
    WTSP = sample[1][0]  # hamiltonian cycle adjacency matrix
    pred = sample[1][0]
    perm = sample[1][1]
    optimal_costs = sample[2]
    ########################## test compute accuracy ##########################
    labels = torch.topk(WTSP, 2, dim=2)[1]
    accuracy = utils.compute_accuracy(WTSP, labels)
    print('accuracy', accuracy)
    ########################## test compute_hamcycle ##########################
Пример #12
0
def main():
    args = read_args_commandline()

    logger = Logger(args.path_logger)
    logger.write_settings(args)

    gen = Generator(args)
    torch.backends.cudnn.enabled = False

    if (args.mode == 'test'):
        print('In testing mode')
        # filename = 'gnn_J' + str(args.J) + '_lyr' + str(args.num_layers) + '_Ntr' + str(gen.N_test) + '_it' + str(args.iterations)
        filename = args.filename_existing_gnn
        path_plus_name = os.path.join(args.path_gnn, filename)
        if ((filename != '') and (os.path.exists(path_plus_name))):
            print('Loading gnn ' + filename)
            gnn = torch.load(path_plus_name)
            if torch.cuda.is_available():
                gnn.cuda()
            acc, z, inb, label = test(gnn, logger, gen, args, iters=None)
            res = {'acc': acc, 'nacc': z, 'inb': inb, 'label': label}
            path_plus_name = os.path.join(args.path_output,
                                          args.filename_test_segm)
            print('Saving acc, nacc, inb...' + args.filename_test_segm)
            with open(path_plus_name, 'wb') as f:
                pickle.dump(res, f, pickle.HIGHEST_PROTOCOL)
        else:
            print('No such a gnn exists; please first create one')

    elif (args.mode == 'train'):
        print('Creating the gnn ...')
        if args.loss_method == 'policy':
            filename = 'lgnn_' + str(args.problem) + str(
                args.problem0) + '_plc' + str(args.num_ysampling) + '_N' + str(
                    args.num_nodes) + '_p' + str(
                        args.edge_density) + '_J' + str(args.J) + '_lyr' + str(
                            args.num_layers) + '_ftr' + str(
                                args.num_features) + '_Lbd' + str(
                                    args.Lambda) + '_LbdR' + str(
                                        args.LambdaIncRate) + '_lr' + str(
                                            args.lr)
        else:
            filename = 'lgnn_' + str(args.problem) + str(
                args.problem0) + '_N' + str(args.num_nodes) + '_p' + str(
                    args.edge_density) + '_J' + str(args.J) + '_lyr' + str(
                        args.num_layers) + '_ftr' + str(
                            args.num_features) + '_Lbd' + str(
                                args.Lambda) + '_LbdR' + str(
                                    args.LambdaIncRate) + '_lr' + str(args.lr)

        path_plus_name = os.path.join(args.path_gnn, filename)
        gnn = lGNN_multiclass(args.num_features, args.num_layers, args.J + 2,
                              args.num_classes)

        if torch.cuda.is_available():
            gnn.cuda()
        print('Training begins')
        train(gnn, logger, gen, args, iters=None)
        print('Saving gnn ' + filename)
        if torch.cuda.is_available():
            torch.save(gnn.cpu(), path_plus_name)
            gnn.cuda()
        else:
            torch.save(gnn, path_plus_name)
Пример #13
0
model1 = model1.eval()
model2 = model2.eval()
model3 = model3.eval()

true_labels = ['true_label']
pre_labels1 = ['AlexNet pre_label']
pre_labels2 = ['InceptionV4 pre_label']
pre_labels3 = ['YOLOV3 pre_label']
pre_labels4 = ['Sum pre_label']
acc1 = 0.0
acc2 = 0.0
acc3 = 0.0
acc4 = 0.0
with torch.no_grad():
    for i in range(30):
        a = Generator(32, mode='test')
        images, labels = next(a)
        images = images.to('cpu')
        labels = labels.to('cpu')
        logits1 = model1(images)
        logits2 = model2(images)
        logits3 = model3(images)
        logits4 = (logits1 + logits2 + logits3) / 3.
        acc1 += accuracy(logits1, labels)
        acc2 += accuracy(logits2, labels)
        acc3 += accuracy(logits3, labels)
        acc4 += accuracy(logits4, labels)
        for t in labels:
            true_labels.append(lab[int(t)])
        for p1 in logits1.argmax(1):
            pre_labels1.append(lab[int(p1)])
Пример #14
0
"""
Regression problem
conversion between Celcius to Farenheight

:complete:
"""

import tensorflow as tf
import numpy as np
import sklearn.metrics
from data_generator import Generator

n = 100
tempGen = Generator(n)
tempCelc, tempFaren = tempGen.getData()
features = np.array(tempCelc)
labels = np.array(tempFaren)

# check examples
assert features.shape == labels.shape == (n, )

# parameters to adjust
learning_rate = 1

dense_layer = tf.keras.layers.Dense(units=1,
                                    input_shape=[1],
                                    activation='relu')

# build model, single dense layer
model = tf.keras.Sequential([
    dense_layer,
Пример #15
0
        for i in range(1, word_target.shape[1]):
            y_pred, hidden = model(word_one_hot, hidden, images)
            word_one_hot = word_target[:, i, :]

            loss += loss_function(word_target[:, i, :], y_pred)

    batch_loss = loss / int(word_target.shape[1])
    variables = model.trainable_variables
    gradients = tape.gradient(loss, variables)
    optimizer.apply_gradients(zip(gradients, variables))

    return batch_loss


if __name__ == '__main__':
    generator_training = Generator(folder_image='train', folder_label='train.txt')

    # generator_valid = Generator(folder_image='train',
    #                             folder_label='train.txt')
    # if args['finetune']:
    #     model.load_weights('model.h5')

    print('data train: ', len(generator_training.examples))
    # print(len(generator_valid.examples))

    step_per_epoch_training = len(generator_training.examples) // BATCH_SIZE

    for epoch in range(EPOCHS):
        start = time.time()
        total_loss = 0
Пример #16
0
    clusters = args.clusters
    clip_grad_norm = args.clip_grad_norm
    batch_size = args.batch_size
    num_features = args.num_features
    num_layers = args.num_layers
    sigma2 = args.sigma2
    reg_factor = args.reg_factor
    K = args.K
    k_step = args.k_step
    n_samples = args.n_samples
    normalize = args.normalize
    last = args.last
    baseline = args.baseline

    if args.dataset == 'GM':
        gen = Generator('/data/anowak/dataset/', num_examples_train,
                        num_examples_test, N, clusters, dim)
    elif args.dataset == "CIFAR":
        gen = GeneratorCIFAR('/data/anowak/dataset/', num_examples_train,
                             num_examples_test, N, clusters, dim)
        dim = 27
    gen.load_dataset()
    num_iterations = 100000

    if not baseline:
        gnn = Split_GNN(num_features, num_layers, 5, dim_input=dim)
    else:
        gnn = Split_BaselineGNN(num_features, num_layers, 5, K, dim_input=dim)
    if args.load_file != '':
        gnn = load_model(args.load_file, gnn)
    optimizer = optim.RMSprop(gnn.parameters(), lr=1e-3)
    # optimizer = optim.Adam(gnn.parameters())
Пример #17
0
                   args.noise, args.generative_model, elapsed]
            print(template1.format(*info))
            print(template2.format(*out))
            # test(siamese_gnn, logger, gen)
        if it % logger.args['save_freq'] == 0:
            logger.save_model(siamese_gnn)
            logger.save_results()
    print('Optimization finished.')

if __name__ == '__main__':
    logger = Logger(args.path_logger)
    logger.write_settings(args)
    siamese_gnn = Siamese_GNN(args.num_features, args.num_layers, args.J + 2)
    if torch.cuda.is_available():
        siamese_gnn.cuda()
    gen = Generator(args.path_dataset)
    # generator setup
    gen.num_examples_train = args.num_examples_train
    gen.num_examples_test = args.num_examples_test
    gen.J = args.J
    gen.edge_density = args.edge_density
    gen.random_noise = args.random_noise
    gen.noise = args.noise
    gen.noise_model = args.noise_model
    gen.generative_model = args.generative_model
    # load dataset
    # print(gen.random_noise)
    gen.load_dataset()
    if args.mode == 'train':
        train(siamese_gnn, logger, gen)
    # elif args.mode == 'test':
Пример #18
0
        or not param[2].isdigit() or not param[3].isdigit():
        print("Usage:\n"
              "\tprocess number\n"
              "\torder number\n "
              "\torder per second\n"
              "example: \./data_sender.py 3 200 20")
        exit()
    sender_num = int(param[1])
    order_num = int(param[2])
    ops = int(param[3])
    # requests.post(URL, json={"user":"******"}, headers={'Connection':'close'})
    # sender_num = 1
    # order_num = 20
    # ops = 10
    # s = '{"user_id": 1,"initiator": "EUR","time": 1562416385042,"items": [{"id": "2","number": 1},{"id": "2","number": 1},{"id": "2","number": 1},{"id": "3","number": 3}]}'
    # json_obj = json.loads(s)

    # ret = requests.post(URL, json=json_obj,\
    #                 headers={'Connection':'close'})
    # print(ret)
    # print(ret.content)

    # generate test data files
    filenames = ["test-" + str(i) + ".data" for i in range(sender_num)]
    for i in filenames:
        Generator.generate_to_file(order_num, i)

    s = Sender(filenames, sender_num, ops)
    ret = s.run()
    print(ret)
Пример #19
0
        if not last and it % 100 == 0:
            loss = loss.data.cpu().numpy()[0]
            out = [
                '---', it, loss, logger.accuracy_test_aux[-1],
                logger.cost_test_aux[-1], args.beam_size, elapsed
            ]
            print(template_test1.format(*info_test))
            print(template_test2.format(*out))
    print('TEST COST: {} | TEST ACCURACY {}\n'.format(
        logger.cost_test[-1], logger.accuracy_test[-1]))


if __name__ == '__main__':
    logger = Logger(args.path_logger)
    logger.write_settings(args)
    gen = Generator(args.path_dataset, args.path_tsp)
    # generator setup
    gen.num_examples_train = args.num_examples_train
    gen.num_examples_test = args.num_examples_test
    gen.J = args.J
    gen.N = args.N
    gen.dual = args.dual
    # load dataset
    gen.load_dataset()
    # initialize model
    siamese_gnn = Siamese_GNN(args.num_features,
                              args.num_layers,
                              gen.N,
                              args.J + 2,
                              dim_input=3,
                              dual=args.dual)
Пример #20
0
    clip_grad_norm = 40.0
    batch_size = 500
    num_features = 32
    num_layers = 5
    n_samples = 20
    scales = args.splits + 1
    if N >= 200:
        num_examples_test = 100
        batch_size = 100

    test = args.test

    gen = Generator(args.dataset_path,
                    args.solver_path,
                    num_examples_train,
                    num_examples_test,
                    N,
                    C_min,
                    C_max,
                    test=test)
    gen.load_dataset()
    num_iterations = 100000

    Knap = Split_GNN(num_features, num_layers, 3, dim_input=3)
    if args.load_file_path != '':
        Knap = load_model(args.load_file_path, Knap)
    optimizer = optim.Adamax(Knap.parameters(), lr=1e-3)

    log = Logger()
    log2 = Logger()
    path_train_plot = os.path.join(args.logs_path, 'training.png')
Пример #21
0
    def __init__(self,
                 batch=16,
                 lr=0.01,
                 load_pretrain=False,
                 model=1,
                 aug=False,
                 mixup=False):
        if model == 1:
            '''
            AlexNet+DSC
            5层卷积全变为深度可分离卷积层
            '''
            self.model = Net1()
            self.m = 1
        elif model == 2:
            self.model = Inception_FPN(5)
            self.m = 2
        elif model == 3:
            self.m = 3
            self.model = Classifier()
        self.device = torch.device(
            'cuda' if torch.cuda.is_available() else 'cpu')
        if torch.cuda.device_count() > 1:
            print('Lets use', torch.cuda.device_count(), 'GPUs!')
            self.model = nn.DataParallel(self.model)
        self.model = self.model.to(self.device)
        self.load_pretrain = load_pretrain
        self.optimizer = Adam(self.model.parameters(),
                              lr=lr,
                              weight_decay=0.001)
        self.lambda1 = lambda epoch: 0.95**(epoch - 20)
        self.lambda2 = lambda epoch: epoch / 20
        self.scheduler = lr_scheduler.LambdaLR(self.optimizer,
                                               lr_lambda=self.lambda2)
        if self.load_pretrain == True:
            load_model(self.model, self.optimizer, self.scheduler, self.m)
            self.train_loss = ['train_loss']
            self.train_acc = ['train_acc']
            self.verify_loss = ['verify_loss']
            self.verify_acc = ['verify_acc']
            self.lr = ['learning rate']

            data = pd.read_csv('./train_data' + str(self.m) + '.csv')
            self.train_loss.extend(data['train_loss'].tolist())
            self.train_acc.extend(data['train_acc'].tolist())
            self.verify_loss.extend(data['verify_loss'].tolist())
            self.verify_acc.extend(data['verify_acc'].tolist())
            self.lr.extend(data['learning rate'].tolist())
        else:
            self.train_loss = ['train_loss']
            self.train_acc = ['train_acc']
            self.verify_loss = ['verify_loss']
            self.verify_acc = ['verify_acc']
            self.lr = ['learning rate']

            for i in self.model.parameters():
                if len(i.shape) >= 2:
                    xavier_normal_(i)
            if self.m == 3:
                for i in self.model.named_modules():
                    if isinstance(i[1], CBLR):
                        constant_(i[1].BatchNorm2d.weight, 0)
        self.mixup = mixup
        self.train_generator = Generator(batch=batch, mode='train', aug=aug)
        self.test_generator = Generator(batch=batch, mode='verify')
Пример #22
0
 clusters = args.clusters
 clip_grad_norm = args.clip_grad_norm
 batch_size = args.batch_size
 num_features = args.num_features
 num_layers = args.num_layers
 sigma2 = args.sigma2
 reg_factor = args.reg_factor
 K = args.K
 k_step = args.k_step
 n_samples = args.n_samples
 normalize = args.normalize
 last = args.last
 baseline = args.baseline
 
 if args.dataset == 'GM':
     gen = Generator('./data/kmeans/', num_examples_train, num_examples_test, N, clusters, dim)
 elif args.dataset == "CIFAR":
     gen = GeneratorCIFAR('./data/kmeans/', num_examples_train, num_examples_test, N, clusters, dim)
     dim = 27
 gen.load_dataset()
 num_iterations = 100000
 
 if not baseline:
     gnn = Split_GNN(num_features, num_layers, 5, dim_input=dim)
 else:
     gnn = Split_BaselineGNN(num_features, num_layers, 5, K, dim_input=dim)
 if args.load_file != '':
     gnn = load_model(args.load_file, gnn)
 optimizer = optim.RMSprop(gnn.parameters(), lr=1e-3)
 # optimizer = optim.Adam(gnn.parameters())
 
Пример #23
0
    accuracy = 1 - frob_norm
    accuracy = accuracy.mean(0).squeeze()
    return accuracy.data.cpu().numpy()[0]


def compute_variance(probs):
    N = probs.size(1)
    mean = probs.sum(1) / N
    dif = probs - mean.unsqueeze(1).expand_as(probs)
    variance = ((dif * dif).sum(1).sum(0)) / (N * probs.size(0))
    return variance


if __name__ == '__main__':
    path_dataset = './dataset/broma/'
    gen = Generator(path_dataset, './LKH/')
    N = 20
    gen.num_examples_train = 200
    gen.num_examples_test = 10
    gen.N = N
    gen.load_dataset()

    clip_grad = 40.0
    iterations = 5000
    batch_size = 20
    num_features = 10
    num_layers = 5
    J = 4
    rf = 10.0  # regularization factor

    Split = Split_GNN(batch_size, num_features, num_layers, J + 2, dim_input=3)