Exemple #1
0
def train_binary_predictor():
    clf = classifier(vocab_size, embedding_size, hidden_size, value_size)
    optimizer = opt.Adam(clf.parameters(), lr=1e-3)

    num_epoch = 20
    for i in range(num_epoch):
        print('epoch {}/{}'.format(i + 1, num_epoch))
        shuffle_indices = np.random.permutation(np.arange(data_size))
        # shuffle_indices = np.arange(data_size)
        txt_ = ref[shuffle_indices]
        lengths_ = ref_lengths[shuffle_indices]
        tgt_ = binary_representation[shuffle_indices]

        for j in range(iters):
            start = j * batch_size
            end = min(data_size, (j + 1) * batch_size)
            y = clf.forward(torch.LongTensor(txt_[start:end]), torch.LongTensor(lengths_[start:end]))
            loss = -torch.sum(torch.mul(torch.log(y), torch.LongTensor(tgt_[start:end]))) \
                   - torch.sum(torch.mul(torch.log(1-y), torch.LongTensor(1 - tgt_[start:end])))
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print(loss)

        torch.save(clf.state_dict(), 'checkpoint/' + str(101+i) + '-parameter.pkl')
Exemple #2
0
def classifierA():
    if request.method == "POST":
        url = request.form.get("url")
        out = classifier(url)
        return render_template("index.html",out=out)

    return render_template("index.html",out="")
def object_recognition_classifier(clf, data_train, data_test, label_train,
                                  label_test, num_of_features):
    """
    Train a classifier and test it based on provided data

    :param clf:
    :param data_train:
    :param data_test:
    :param label_train:
    :param label_test:
    :param num_of_features:

    :return: accuracy, prediction
    """

    train_cats_data = data_train.reshape(-1, num_of_features)
    train_cats_label = label_train.reshape(-1, 1).flatten()

    test_cats_data = data_test.reshape(-1, num_of_features)
    test_cats_label = label_test.reshape(-1, 1).flatten()

    y_acc, y_pred = classifier(clf, train_cats_data, test_cats_data,
                               train_cats_label, test_cats_label)

    return y_acc, y_pred
Exemple #4
0
def main():

    use_cuda = True
    batch_size=64
    lr=0.1
    test_batch_size=128

    epochs=20
    device = torch.device("cuda" if use_cuda else "cpu")

    train_kwargs = {'batch_size': batch_size}
    test_kwargs = {'batch_size': test_batch_size}
    if use_cuda:
        cuda_kwargs = {'num_workers': 1,
                       'pin_memory': True,
                       'shuffle': True}
        train_kwargs.update(cuda_kwargs)
        test_kwargs.update(cuda_kwargs)

    transform=transforms.Compose([
        transforms.ToTensor(),
        transforms.Resize((32,32))
        ])

    dataset1 = Mnist_m('mnist_m',train=True,transform=transform)
    dataset2=Mnist_m('mnist_m',train=False,transform=transform)
    datasets=[dataset1,dataset2]
    dataset_test = ConcatDataset(datasets)
    
    test_loader = torch.utils.data.DataLoader(dataset_test, **test_kwargs)

    model = classifier().cuda()#.to(device)
    model.load_state_dict(torch.load('classifier_complete.pt'))

    test(model, device, test_loader)
Exemple #5
0
def train(epoch, dataloader):
    loss = []
    f1score = []
    for i in range(epoch):
        y_pred = []
        y_true = []
        for batch_idx, (data, target) in enumerate(dataloader):
            data, target = Variable(data), Variable(target)
            data = data.type(torch.cuda.FloatTensor)
            target = target.type(torch.cuda.FloatTensor)
            optimizer.zero_grad()
            output = classifier(data)
            loss = F.binary_cross_entropy(output, target)
            loss.backward()
            optimizer.step()
            output = output.cpu().detach().numpy()
            y_pred.append(output)
            target = target.cpu().numpy()
            y_true.append(target)
            if batch_idx % 100 == 0:
                print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
                    i, batch_idx * len(data), len(dataloader.dataset),
                    100. * batch_idx / len(dataloader), loss.item()))
        y_pred = get_pred(y_pred)
        f_score = get_fscore(y_true, y_pred)
        loss.append(loss.item)
        f1score.append(f_score)
        print('Train Epoch: {} \tf1_score: {:.6f}'.format(epoch, f_score))
    return loss, f1score
def classify(image, top_k, k_patches, ckpt_path, imagenet_path):
    wnids, words = tu.load_imagenet_meta(
        os.path.join(imagenet_path, 'data/meta.mat'))

    image_patches = tu.read_k_patches(image, k_patches)

    x = tf.placeholder(tf.float32, [None, 224, 224, 3])

    _, pred = model.classifier(x, dropout=1.0)

    avg_prediction = tf.div(tf.reduce_sum(pred, 0), k_patches)

    scores, indexes = tf.nn.top_k(avg_prediction, k=top_k)

    saver = tf.train.Saver()

    with tf.Session(config=tf.ConfigProto()) as sess:
        saver.restore(sess, os.path.join(ckpt_path, 'cnn.ckpt'))

        s, i = sess.run([scores, indexes], feed_dict={x: image_patches})
        s, i = np.squeeze(s), np.squeeze(i)

        print('AlexNet saw:')
        for idx in range(top_k):
            print('{} - score: {}'.format(words[i[idx]], s[idx]))
def object_based_5_fold_cross_validation(clf, data_train, data_test, labels, num_of_features):
    """
    Perform object based 5 fold cross validation and return mean accuracy

    :param clf: classifier
    :param data_train: Training dataset
    :param data_test: Testing dataset
    :param labels: True labels
    :param num_of_features: Number of features of the robot

    :return: mean accuracy of 5 fold validation
    """

    tts = train_test_splits(OBJECTS_PER_CATEGORY)

    my_acc = []

    for a_fold in sorted(tts):
        train_cats_index = tts[a_fold]["train"]
        test_cats_index = tts[a_fold]["test"]

        train_cats_data = data_train[:, train_cats_index]
        train_cats_label = labels[:, train_cats_index]
        train_cats_data = train_cats_data.reshape(-1, num_of_features)
        train_cats_label = train_cats_label.reshape(-1, 1).flatten()

        test_cats_data = data_test[:, test_cats_index]
        test_cats_label = labels[:, test_cats_index]
        test_cats_data = test_cats_data.reshape(-1, num_of_features)
        test_cats_label = test_cats_label.reshape(-1, 1).flatten()

        y_acc, y_pred = classifier(clf, train_cats_data, test_cats_data, train_cats_label, test_cats_label)
        my_acc.append(y_acc)

    return np.mean(my_acc)
Exemple #8
0
def main():
    in_arg = get_input_args()
    data_dir = in_arg.data_dir
    save_dir = in_arg.save_dir
    arch = in_arg.arch
    learning_rate = in_arg.learning_rate
    hidden_units = in_arg.hidden_units
    epochs = in_arg.epochs
    #processing_unit = in_arg.gpu
    if torch.cuda.is_available() and in_arg.gpu == 'gpu':
        print('GPU will be used')
        processing_unit = 'gpu'
    elif torch.cuda.is_available() == False:
        print('CPU will be used')
        processing_unit = 'cpu'

    print(in_arg)

    training_dataloaders, validation_dataloaders, testing_dataloaders, class_to_idx = load_datas(
        data_dir)
    pre_model = pretrained_model(arch)
    model = classifier(pre_model, hidden_units)
    after_train_model = train_model(model, training_dataloaders,
                                    validation_dataloaders, learning_rate,
                                    epochs, processing_unit)
    valid_model(after_train_model, testing_dataloaders, processing_unit)

    save_checkpoint(model, save_dir, class_to_idx)
Exemple #9
0
def upload_file():
    if request.method == 'POST':
        f1 = request.form['text1']
        f2 = request.form['text2']
        df, result = form_dataset(f1, f2)
        df.both = df.both.apply(mystem_combined)
        from model import obrabotka
        obrabotka(df.both)
        from model import features
        geo_class = features(df, result)
        from model import classifier
        answers, predictions = classifier(geo_class)
        if answers[0] == 1:
            whether_dup = u'являются'
            proba = round(predictions[0][1] * 100, 2)
        else:
            whether_dup = u'не являются'
            proba = round(predictions[0][0] * 100, 2)
        visible = True
    return render_template('index.html',
                           visible=visible,
                           proba=proba,
                           whether_dup=whether_dup,
                           f1=f1,
                           f2=f2)
Exemple #10
0
def findFractureClassifier(params):
    with open(os.path.join(params.median, "ribs.json"), "r") as file:
        ribs = json.load(file)
    dataset = data.ClassifierTestDataset(params.processed, ribs, params)

    # classifier = model.classifier(False, os.path.join(
    # params.model_path, "resnet34-333f7ec4.pth"))
    classifier = model.classifier(False)
    classifier = torch.nn.DataParallel(classifier, device_ids=[0])
    if params.useGPU:
        classifier = classifier.cuda()
    d = torch.load(os.path.join(params.model_path, "classifier.pt"))
    # d1 = torch.load()
    # d.update(d1)
    classifier.load_state_dict(d)

    classifier.eval()
    detected = []
    cnt = 0
    timer = Timer()
    for batch, centers, imgIDs in torch.utils.data.DataLoader(
            dataset, batch_size=params.batchSize, pin_memory=True,
            num_workers=0):
        cnt += 1
        if params.useGPU:
            batch = batch.cuda()
        with torch.no_grad():
            output = classifier(batch)
            output = torch.nn.functional.softmax(output, 1)
        output = output.cpu().numpy()
        centers = centers.numpy()
        imgIDs = imgIDs.numpy()
        for i in range(output.shape[0]):
            out = output[i]
            if out[1] > params.detectThreshold:
                detected.append((centers[i], imgIDs[i], out[1]))
        if cnt % 100 == 0:
            print(f"Batch {cnt} {timer()}")

    with open(params.anno_path, "r") as file:
        anno = json.load(file)
    anno["annotations"] = []
    regionSize = params.detectRegionSize
    for i, d in enumerate(detected):
        center, imgID, score = d
        anno["annotations"].append({
            "bbox": [
                float(center[0]) - regionSize / 2,
                float(center[1]) - regionSize / 2, regionSize, regionSize
            ],
            "id":
            i,
            "image_id":
            int(imgID),
            "score":
            float(score)
        })
    with open(os.path.join(params.median, "detection.json"), "w") as file:
        json.dump(anno, file, indent=4)
Exemple #11
0
    def build_model(self):

        input_width = self.config['model']['input_width']
        input_height = self.config['model']['input_height']
        class_num = self.config['model']['class_num']
        backbone = self.config['model']['backbone']

        train_base = self.config['train']['train_base']

        return classifier(class_num, input_width, input_height, backbone,
                          train_base)
Exemple #12
0
def inference(hypes, image, phase):
    fex = feature_extractorl('vgg16.npy')
    feature = fex.build(image)
    string_length = hypes['arch']['maxstr']
    logits = hypes['arch']['dictlen']

    char_list = [
        tf.expand_dims(
            classifier('chcnt_' + str(i), logits).build(feature, phase), 1)
        for i in range(string_length)
    ]
    return tf.concat(char_list, 1)
Exemple #13
0
def classifierA():
    if request.method == "POST":
        Clump = int(request.form.get("Clump"))
        UnifSize = int(request.form.get("UnifSize"))
        UnifShape = int(request.form.get("UnifShape"))
        MargAdh = int(request.form.get("MargAdh"))
        SingEpiSize = int(request.form.get("SingEpiSize"))
        BareNuc = int(request.form.get("BareNuc"))
        BlandChrom = int(request.form.get("BlandChrom"))
        NormNucl = int(request.form.get("NormNucl"))
        Mit = int(request.form.get("Mit"))
        out = classifier(Clump, UnifSize, UnifShape, MargAdh, SingEpiSize,
                         BareNuc, BlandChrom, NormNucl, Mit)
        return render_template("login.html",
                               out="The Class Of the Tumor Is:" + out)

    return render_template("login.html", out="")
Exemple #14
0
def test( 
		top_k, 
		k_patches, 
		display_step,
		imagenet_path,
		ckpt_path):
	test_images = sorted(os.listdir(os.path.join(imagenet_path, 'ILSVRC2012_img_val')))
	test_labels = tu.read_test_labels(os.path.join(imagenet_path, 'data/ILSVRC2012_validation_ground_truth.txt'))

	test_examples = len(test_images)
	
	x = tf.placeholder(tf.float32, [None, 224, 224, 3])
	y = tf.placeholder(tf.float32, [None, 1000])

	_, pred = model.classifier(x, 1.0)

	avg_prediction = tf.div(tf.reduce_sum(pred, 0), k_patches)

	top1_correct = tf.equal(tf.argmax(avg_prediction, 0), tf.argmax(y, 1))
	top1_accuracy = tf.reduce_mean(tf.cast(top1_correct, tf.float32))

	topk_correct = tf.nn.in_top_k(tf.stack([avg_prediction]), tf.argmax(y, 1), k=top_k)
	topk_accuracy = tf.reduce_mean(tf.cast(topk_correct, tf.float32))

	saver = tf.train.Saver()

	with tf.Session(config=tf.ConfigProto()) as sess:
		saver.restore(sess, os.path.join(ckpt_path, 'cnn.ckpt'))

		total_top1_accuracy = 0.
		total_topk_accuracy = 0.

		for i in range(test_examples):
			image_patches = tu.read_k_patches(os.path.join(imagenet_path, 'ILSVRC2012_img_val', test_images[i]), k_patches)
			label = test_labels[i]

			top1_a, topk_a = sess.run([top1_accuracy, topk_accuracy], feed_dict={x: image_patches, y: [label]})
			total_top1_accuracy += top1_a
			total_topk_accuracy += topk_a

			if i % display_step == 0:
				print ('Examples done: {:5d}/{} ---- Top-1: {:.4f} -- Top-{}: {:.4f}'.format(i + 1, test_examples, total_top1_accuracy / (i + 1), top_k, total_topk_accuracy / (i + 1)))
		
		print ('---- Final accuracy ----')
		print ('Top-1: {:.4f} -- Top-{}: {:.4f}'.format(total_top1_accuracy / test_examples, top_k, total_topk_accuracy / test_examples))
		print ('Top-1 error rate: {:.4f} -- Top-{} error rate: {:.4f}'.format(1 - (total_top1_accuracy / test_examples), top_k, 1 - (total_topk_accuracy / test_examples)))
def classify(
		image, 
		top_k, 
		k_patches, 
		ckpt_path, 
		imagenet_path):
	"""	Procedure to classify the image given through the command line

		Args:
			image:	path to the image to classify
			top_k: 	integer representing the number of predictions with highest probability
					to retrieve
			k_patches:	number of crops taken from an image and to input to the model
			ckpt_path:	path to model's tensorflow checkpoint
			imagenet_path:	path to ILSRVC12 ImageNet folder containing train images, 
						validation images, annotations and metadata file

	"""
	wnids, words = tu.load_imagenet_meta(os.path.join(imagenet_path, 'data/meta.mat'))

	# taking a few crops from an image
	image_patches = tu.read_k_patches(image, k_patches)

	x = tf.placeholder(tf.float32, [None, 224, 224, 3])

	_, pred = model.classifier(x, dropout=1.0)

	# calculate the average precision through the crops
	avg_prediction = tf.div(tf.reduce_sum(pred, 0), k_patches)

	# retrieve top 5 scores
	scores, indexes = tf.nn.top_k(avg_prediction, k=top_k)

	saver = tf.train.Saver()

	with tf.Session(config=tf.ConfigProto()) as sess:
		saver.restore(sess, os.path.join(ckpt_path, 'cnn.ckpt'))

		s, i = sess.run([scores, indexes], feed_dict={x: image_patches})
		s, i = np.squeeze(s), np.squeeze(i)

		print('AlexNet saw:')
		for idx in range(top_k):
			print ('{} - score: {}'.format(words[i[idx]], s[idx]))
Exemple #16
0
    def __init__(self, dataset_folder, mode, mean, label, name, quantized,
                 prefix, subset_size):
        self.dataset = dataset_folder
        self.prefix = prefix

        self.ds = dataset(self.dataset, label)
        self.random_pick = False
        self.max_images = subset_size
        self.filenames = self.ds.get_subset(max_size=self.max_images)

        self.mean = mean
        self.mode = mode

        self.model_file = prefix + '/' + name + '/' + quantized + '_model.prototxt'
        self.weights_file = prefix + '/' + name + '/' + quantized + '_weights.caffemodel'
        self.quantized = quantized
        self.net = classifier(self.model_file, self.weights_file, self.mean,
                              self.mode)
        self.inj = injector(self.net, self.quantized)
 def __init__(self):
     super(runModel, self).__init__()
     self.keep_going = False
     self.load_path = "weight/91acc_9category_1024_512_256.pt"  # 继续训练时,要加载的参数文件
     self.batch_size = 30  # 分批训练数据、每批数据量
     self.learning_rate = 0.01  # 1e-2  # 学习率
     self.num_epoches = 700  # 训练次数
     cfg_path = "./cfg/network.cfg"
     net_block = parse_model_cfg(cfg_path)[0]  # [net]
     self.n_classes = net_block["n_classes"]  # 类别数
     # 保存用于可视化观察参数变化的列表
     self.loss_list, self.lr_list, self.acc_list = [], [], [
     ]  # 记录损失值\学习率\准确率变化
     self.layer_list = [1024, 512, 256]
     # self.model = MyFC(self.layer_list, 42, self.n_classes)
     self.model = classifier("./cfg/network.cfg")
     self.train_loader = MyDataLoader(batch_size=self.batch_size).train()
     self.test_loader = MyDataLoader(batch_size=self.batch_size).test()
     self.best_acc = 0  # 最高准确类
     self.writer = SummaryWriter(logdir='./log')  # 记录训练日志
Exemple #18
0
        shuffle=True,
        # num_workers=config.cpu_processor,
        drop_last=True)

    test_dataset = custom_dataset.Custom_dataset(word_to_index,
                                                 path_csv="train_data.csv")
    test_data = test_dataset.get_data()
    test_loader = DataLoader(test_data,
                             batch_size=config.batch,
                             shuffle=False,
                             num_workers=config.cpu_processor,
                             drop_last=True)

    # 모델 설정
    device = 'cpu'  #torch.device(config.gpu if torch.cuda.is_available() else 'cpu')
    model = model.classifier(len(word_to_index))
    model.to(device)

    optimizer = torch.optim.Adam(model.parameters(), lr=config.learning_rate)
    loss_function = nn.CrossEntropyLoss()

    # 훈련
    step_list = []
    loss_list = []
    acc_test_list = []
    acc_dev_list = []
    step = 0
    for i in range(config.epoch):
        print("epoch = ", i)
        start = time.time()
        for n, (label, sent) in enumerate(train_loader):
Exemple #19
0
        drop_last=True)

    # test_loader = DataLoader(test_data,
    #                         batch_size=c.batch,
    #                         shuffle=False,
    #                         num_workers=c.cpu_processor,
    #                         drop_last=True)

    dev_loader = DataLoader(val_data,
                            batch_size=c.batch,
                            shuffle=False,
                            num_workers=c.cpu_processor,
                            drop_last=True)

    # 모델 설정
    model = model.classifier()
    model.cuda()

    optimizer = torch.optim.Adam(model.parameters(), lr=c.learning_rate)
    loss_function = nn.CrossEntropyLoss()

    # 훈련
    step_list = []
    loss_list = []
    acc_test_list = []
    acc_dev_list = []
    step = 0
    for i in range(c.epoch):
        start = time.time()
        for n, (q, label) in enumerate(train_loader):
            optimizer.zero_grad()  # 초기화
Exemple #20
0
init_trl_label = keras.utils.to_categorical(init_trl_label)
init_trunl_data = np.expand_dims(init_trunl_data, axis=4)
init_trunl_label = keras.utils.to_categorical(init_trunl_label)
te_data = np.expand_dims(te_data, axis=4)
te_label = keras.utils.to_categorical(te_label)

init_trl_set = tf.data.Dataset.from_tensor_slices(
    (init_trl_data,
     init_trl_label)).shuffle(len(init_trl_data)).batch(opt.BATCH_SIZE)
te_set = tf.data.Dataset.from_tensor_slices(
    (te_data, te_label)).batch(opt.BATCH_SIZE)
"""
Model.
"""
'''create classifier'''
classifier = classifier()
optim = keras.optimizers.Adam(lr=opt.LR, decay=opt.DECAY)
classifier.build(input_shape=(opt.BATCH_SIZE, opt.WINDOW_SIZE, opt.WINDOW_SIZE,
                              opt.CHANNEL, 1))
classifier.summary()
'''create feature generator & feature discriminator'''
fea_g = generator()
fea_g.build(input_shape=(opt.BATCH_SIZE, opt.DIM_Z))
fea_g.summary()

fea_d = discriminator()
fea_d.build(input_shape=(opt.BATCH_SIZE, 17 * 17 * 64))
fea_d.summary()

d_loss, g_loss = dcgan_loss()
fea_g_optim = keras.optimizers.Adam(learning_rate=opt.GAN_LR, beta_1=0.5)
    def __init__(self, args):
        # Misc
        use_cuda = args.cuda and torch.cuda.is_available()
        self.device = 'cuda' if use_cuda else 'cpu'
        self.name = args.name
        self.max_iter = int(args.max_iter)
        self.print_iter = args.print_iter
        self.global_iter = 0
        self.global_iter_cls = 0
        self.pbar = tqdm(total=self.max_iter)
        self.pbar_cls = tqdm(total=self.max_iter)

        # Data
        self.dset_dir = args.dset_dir
        self.dataset = args.dataset
        self.batch_size = args.batch_size
        self.eval_batch_size = args.eval_batch_size
        self.data_loader = return_data(args, 0)
        self.data_loader_eval = return_data(args, 2)

        # Networks & Optimizers
        self.z_dim = args.z_dim
        self.gamma = args.gamma
        self.beta = args.beta

        self.lr_VAE = args.lr_VAE
        self.beta1_VAE = args.beta1_VAE
        self.beta2_VAE = args.beta2_VAE

        self.lr_D = args.lr_D
        self.beta1_D = args.beta1_D
        self.beta2_D = args.beta2_D
        self.alpha = args.alpha
        self.beta = args.beta
        self.grl = args.grl

        self.lr_cls = args.lr_cls
        self.beta1_cls = args.beta1_D
        self.beta2_cls = args.beta2_D

        if args.dataset == 'dsprites':
            self.VAE = FactorVAE1(self.z_dim).to(self.device)
            self.nc = 1
        else:
            self.VAE = FactorVAE2(self.z_dim).to(self.device)
            self.nc = 3
        self.optim_VAE = optim.Adam(self.VAE.parameters(),
                                    lr=self.lr_VAE,
                                    betas=(self.beta1_VAE, self.beta2_VAE))

        self.pacls = classifier(30, 2).cuda()
        self.revcls = classifier(30, 2).cuda()
        self.tcls = classifier(30, 2).cuda()
        self.trevcls = classifier(30, 2).cuda()

        self.targetcls = classifier(59, 2).cuda()
        self.pa_target = classifier(30, 2).cuda()
        self.target_pa = paclassifier(1, 1).cuda()
        self.pa_pa = classifier(30, 2).cuda()

        self.D = Discriminator(self.z_dim).to(self.device)
        self.optim_D = optim.Adam(self.D.parameters(),
                                  lr=self.lr_D,
                                  betas=(self.beta1_D, self.beta2_D))

        self.optim_pacls = optim.Adam(self.pacls.parameters(), lr=self.lr_D)

        self.optim_revcls = optim.Adam(self.revcls.parameters(), lr=self.lr_D)

        self.optim_tcls = optim.Adam(self.tcls.parameters(), lr=self.lr_D)
        self.optim_trevcls = optim.Adam(self.trevcls.parameters(),
                                        lr=self.lr_D)

        self.optim_cls = optim.Adam(self.targetcls.parameters(),
                                    lr=self.lr_cls)
        self.optim_pa_target = optim.Adam(self.pa_target.parameters(),
                                          lr=self.lr_cls)
        self.optim_target_pa = optim.Adam(self.target_pa.parameters(),
                                          lr=self.lr_cls)
        self.optim_pa_pa = optim.Adam(self.pa_pa.parameters(), lr=self.lr_cls)

        self.nets = [
            self.VAE, self.D, self.pacls, self.targetcls, self.revcls,
            self.pa_target, self.tcls, self.trevcls
        ]

        # Visdom
        self.viz_on = args.viz_on
        self.win_id = dict(D_z='win_D_z',
                           recon='win_recon',
                           kld='win_kld',
                           acc='win_acc')
        self.line_gather = DataGather('iter', 'soft_D_z', 'soft_D_z_pperm',
                                      'recon', 'kld', 'acc')
        self.image_gather = DataGather('true', 'recon')
        if self.viz_on:
            self.viz_port = args.viz_port
            self.viz = visdom.Visdom(port=self.viz_port)
            self.viz_ll_iter = args.viz_ll_iter
            self.viz_la_iter = args.viz_la_iter
            self.viz_ra_iter = args.viz_ra_iter
            self.viz_ta_iter = args.viz_ta_iter
            if not self.viz.win_exists(env=self.name + '/lines',
                                       win=self.win_id['D_z']):
                self.viz_init()

        # Checkpoint
        self.ckpt_dir = os.path.join(args.ckpt_dir, args.name)
        self.ckpt_save_iter = args.ckpt_save_iter
        mkdirs(self.ckpt_dir + "/cls")
        mkdirs(self.ckpt_dir + "/vae")

        if args.ckpt_load:

            self.load_checkpoint(args.ckpt_load)

        # Output(latent traverse GIF)
        self.output_dir = os.path.join(args.output_dir, args.name)
        self.output_save = args.output_save
        mkdirs(self.output_dir)
Exemple #22
0
def inference_from_embeddings(embeddings, threshold):
    output = classifier(embeddings)
    predicted_probabilities = F.softmax(output, dim=1).detach().numpy()
    return "HELPFUL" if predicted_probabilities[
        0, 1] >= threshold else "NOT HELPFUL"
Exemple #23
0
def main():
    # Training settings

    use_cuda = True
    gamma = 0.7
    save_model = True
    batch_size = 128  #128
    lr = 0.01
    test_batch_size = 256

    epochs = 500
    device = torch.device("cuda" if use_cuda else "cpu")

    train_kwargs = {'batch_size': batch_size}
    test_kwargs = {'batch_size': test_batch_size}
    if use_cuda:
        cuda_kwargs = {'num_workers': 8, 'pin_memory': True, 'shuffle': True}
        train_kwargs.update(cuda_kwargs)
        test_kwargs.update(cuda_kwargs)

    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Resize((32, 32)),
        #transforms.Normalize((0.1307,), (0.3081,))
    ])
    dataset1 = datasets.MNIST('../data',
                              train=True,
                              download=True,
                              transform=transform)
    dataset2 = datasets.MNIST('../data', train=False, transform=transform)
    train_loader = torch.utils.data.DataLoader(dataset1, **train_kwargs)
    test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)

    model = classifier().cuda()  #.to(device)
    model2 = Net().cuda()
    model.load_state_dict(torch.load('classifier_basic.pt'))
    optimizer = optim.Adam(model2.parameters(), lr=0.001, betas=(0.9, 0.999))
    #optimizer = optim.SGD(model2.parameters(),lr=0.001,momentum=0.9,nesterov=True)
    #scheduler = StepLR(optimizer, step_size=1,gamma=gamma)
    scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer,
                                                           mode='max',
                                                           factor=0.9,
                                                           patience=3)
    for epoch in range(1, epochs + 1):
        print("Epoch {}".format(epoch))
        model2.train()
        log_interval = 10
        loss_sum = 0.0
        from tqdm import tqdm
        for (data, target) in tqdm(train_loader):
            data, target = data.to(device), target.to(device)

            optimizer.zero_grad()
            output, loss_weight = (model2((data)))
            #print(output[0][0])
            #print(torch.unique(output[0].detach()))
            cv2.imwrite(
                'demo.png',
                cv2.pyrUp(
                    cv2.pyrUp(
                        output.detach().cpu().numpy()[0].transpose(1, 2, 0) *
                        255)))
            cv2.imwrite(
                'demo_orig.png',
                cv2.pyrUp(
                    cv2.pyrUp(
                        data.detach().cpu().numpy()[0].transpose(1, 2, 0) *
                        255)))
            output_ = (model(output))
            #print(data.size())
            loss_sep = (loss_weight.mean())
            loss_class = -(nn.CrossEntropyLoss()(output_, target)) * 1e+2
            loss = loss_class  #+loss_sep
            loss_class.backward()

            #print("Loss Train",float(np.abs(float(loss))))

            loss_sum += np.abs(
                float(loss) / (len(train_loader.dataset) // data.shape[0]))
            #print("Loss_Sep {} Loss_Classification {}".format((float(loss_sep)),np.abs(float(loss_class))))
            optimizer.step()
            #break
            #if batch_idx % log_interval == 0:
        print('Epoch {} Train loss {}'.format(epoch, loss_sum))

        model2.eval()
        model.eval()
        test_loss = 0
        correct = 0
        with torch.no_grad():
            for data, target in test_loader:
                data, target = data.to(device), target.to(device)
                output, loss_weight = model2(data)
                output = model(output)
                test_loss += np.abs(
                    (nn.CrossEntropyLoss()(output, target).cpu().item()) +
                    (loss_weight.mean().cpu()))  # sum up batch loss
                pred = output.argmax(
                    dim=1,
                    keepdim=True)  # get the index of the max log-probability
                correct += pred.eq(target.view_as(pred)).sum().item()

        test_loss /= len(test_loader.dataset)

        print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.
              format(test_loss, correct, len(test_loader.dataset),
                     100. * correct / len(test_loader.dataset)))
        torch.save(model2.state_dict(), 'classifier_advanced.pt')

        scheduler.step(test_loss)

    if save_model:
        torch.save(model.state_dict(), "mnist_cnn.pt")
Exemple #24
0
                                         batch_size=BATCH_SIZE,
                                         pad_mode=pad_mode)
            train_loader = DataLoader(train_dataset)
            valid_dataset = TestDataset(texts_valid,
                                        labels_valid,
                                        dico,
                                        batch_size=BATCH_SIZE,
                                        pad_mode=pad_mode)
            valid_loader = DataLoader(valid_dataset)

            # Instantiate the model
            model = mod.classifier(embedding_dim,
                                   num_hidden_nodes,
                                   num_output_nodes,
                                   num_layers,
                                   init=init,
                                   glove=glove,
                                   embed_weights=embed_weights,
                                   bidirectional=bidirectional,
                                   dropout=dropout)
            model.to(device)

            # Visualizing architecture
            print(model)

            #define optimizer and loss
            optimizer = optim.Adagrad(
                model.parameters(), lr=lr, weight_decay=L2
            )  #Momentum seems not to be in pytorch adagrad ...

            criterion = nn.CrossEntropyLoss()
Exemple #25
0
data_transform = transforms.Compose(
    [transforms.Resize((16, 16)),
     transforms.ToTensor()])
s_trainset = datasets.MNIST('tmp',
                            download=True,
                            train=True,
                            transform=data_transform)
s_testset = datasets.MNIST('tmp', train=False, transform=data_transform)
s_trainloader = DataLoader(s_trainset, batch_size=batch_size, shuffle=True)
s_testloader = DataLoader(s_testset, batch_size=batch_size, shuffle=True)
t_trainset, t_testset = load_usps(data_per_class)  #transformの指定は禁止
t_trainloader = DataLoader(t_trainset, batch_size=batch_size, shuffle=True)
t_testloader = DataLoader(t_testset, batch_size=64, shuffle=True)

net_g = Encoder()
net_h = classifier()
net_DCD = DCD()
loss_func = torch.nn.CrossEntropyLoss()  #損失関数は共通

#ソースにおいてgとhを訓練
print("part 1 : initial training for g and h")
optimizer = torch.optim.Adam(list(net_g.parameters()) +
                             list(net_h.parameters()),
                             lr=0.001)  #optimizerが両者を更新
net_g = net_g.to(device)
net_h = net_h.to(device)
net_DCD = net_DCD.to(device)
if not device == "cpu":
    net_g = nn.DataParallel(net_g)
    net_h = nn.DataParallel(net_h)
    net_DCD = nn.DataParallel(net_DCD)
#define hyperparameters
size_of_vocab = tk.vocab_len()
embedding_dim = args.embedding_dim
num_hidden_nodes = args.num_hidden_nodes
num_output_nodes = 1
num_layers = args.num_layers
bidirection = True
dropout = args.dropout
if args.num_layers == 1:
    dropout = 0.0

#instantiate the model
model = classifier(size_of_vocab,
                   embedding_dim,
                   num_hidden_nodes,
                   num_output_nodes,
                   num_layers,
                   bidirectional=bidirection,
                   dropout=dropout)

print(model)


def count_parameters(model):
    return sum(p.numel() for p in model.parameters() if p.requires_grad)


print(f'The model has {count_parameters(model):,} trainable parameters')


def aucroc(y_scores, y_true):
Exemple #27
0
def main():
    np.random.seed(42)
    phoneHMMs = np.load('../Assignment2/lab2_models_all.npz',
                        allow_pickle=True)['phoneHMMs'].item()
    phones = sorted(phoneHMMs.keys())
    nstates = {phone: phoneHMMs[phone]['means'].shape[0] for phone in phones}
    state_list = [
        ph + '_' + str(idx) for ph in phones for idx in range(nstates[ph])
    ]

    filenames = ['Data/traindata.npz', 'Data/testdata.npz']
    sets = [
        'tidigits/disc_4.1.1/tidigits/train',
        'tidigits/disc_4.2.1/tidigits/test'
    ]
    for idx, file_name in enumerate(filenames):
        if not os.path.isfile(file_name):
            data = []
            for root, dirs, files in os.walk(sets[idx]):
                for file in tqdm(files):
                    if file.endswith('.wav'):
                        filename = os.path.join(root, file)
                        samples, samplingrate = loadAudio(filename)
                        lmfcc, mspec = lab1_proto.mfcc(samples)
                        wordTrans = list(path2info(filename)[2])
                        phoneTrans = words2phones(wordTrans, prondict.prondict)
                        targets = forcedAlignment(lmfcc, phoneHMMs, phoneTrans)
                        data.append({
                            'filename': filename,
                            'lmfcc': lmfcc,
                            'mspec': mspec,
                            'targets': targets
                        })
            if file_name == 'traindata.npz':
                np.savez(PATH + file_name, traindata=data)
            elif file_name == 'testdata.npz':
                np.savez(PATH + file_name, testdata=data)

    traindata = np.load('Data/traindata.npz', allow_pickle=True)['traindata']
    women_tracks = list()
    men_tracks = list()
    for tr in traindata:
        if 'woman' in tr['filename']:
            women_tracks.append(tr)
        elif 'man' in tr['filename']:
            men_tracks.append(tr)
    men_tracks = np.array(men_tracks)
    women_tracks = np.array(women_tracks)

    val_size = int(len(traindata) * 0.1)  # Percentage of validation data
    men_pos = np.random.choice(len(men_tracks),
                               int(val_size / 2),
                               replace=False)  # Randomly get men samples
    women_pos = np.random.choice(len(women_tracks),
                                 int(val_size / 2),
                                 replace=False)  # Randomly get women samples
    men_val = men_tracks[men_pos]  # Get validation men
    women_val = women_tracks[women_pos]  # Get validation women
    men_tracks = np.delete(men_tracks,
                           men_pos)  # Delete validation men from training set
    women_tracks = np.delete(
        women_tracks, women_pos)  # Delete validation women from training set
    # Get training, validation and testing
    traindata = np.concatenate((men_tracks, women_tracks))
    valdata = np.concatenate((men_val, women_val))
    testdata = np.load('Data/testdata.npz', allow_pickle=True)['testdata']

    dynamic = True
    feature = 'lmfcc'
    if dynamic:
        if not os.path.isfile(PATH + 'dynxtraindata_' + feature + '.npz') or not \
                os.path.isfile(PATH + 'dynytraindata_' + feature + '.npz'):
            x, y = dynamic_features(traindata,
                                    feature_name=feature,
                                    dynamic=dynamic)
            np.savez(PATH + 'dynxtraindata_' + feature + '.npz', traindata=x)
            np.savez(PATH + 'dynytraindata_' + feature + '.npz', traindata=y)
        x_train = np.load(PATH + 'dynxtraindata_' + feature + '.npz',
                          allow_pickle=True)['traindata']
        y_train = np.load(PATH + 'dynytraindata_' + feature + '.npz',
                          allow_pickle=True)['traindata']
        if not os.path.isfile(PATH + 'dynxvaldata_' + feature + '.npz') or not \
                os.path.isfile(PATH + 'dynyvaldata_' + feature + '.npz'):
            x, y = dynamic_features(valdata,
                                    feature_name=feature,
                                    dynamic=dynamic)
            np.savez(PATH + 'dynxvaldata_' + feature + '.npz', valdata=x)
            np.savez(PATH + 'dynyvaldata_' + feature + '.npz', valdata=y)
        x_val = np.load(PATH + 'dynxvaldata_' + feature + '.npz',
                        allow_pickle=True)['valdata']
        y_val = np.load(PATH + 'dynyvaldata_' + feature + '.npz',
                        allow_pickle=True)['valdata']
        if not os.path.isfile(PATH + 'dynxtestdata_' + feature + '.npz') or not \
                os.path.isfile(PATH + 'dynytestdata_' + feature + '.npz'):
            x, y = dynamic_features(testdata,
                                    feature_name=feature,
                                    dynamic=dynamic)
            np.savez(PATH + 'dynxtestdata_' + feature + '.npz', testdata=x)
            np.savez(PATH + 'dynytestdata_' + feature + '.npz', testdata=y)
        x_test = np.load(PATH + 'dynxtestdata_' + feature + '.npz',
                         allow_pickle=True)['testdata']
        y_test = np.load(PATH + 'dynytestdata_' + feature + '.npz',
                         allow_pickle=True)['testdata']
    else:
        if not os.path.isfile(PATH + 'xtraindata_' + feature + '.npz') or not \
                os.path.isfile(PATH + 'ytraindata_' + feature + '.npz'):
            x, y = dynamic_features(traindata,
                                    feature_name=feature,
                                    dynamic=dynamic)
            np.savez(PATH + 'xtraindata_' + feature + '.npz', traindata=x)
            np.savez(PATH + 'ytraindata_' + feature + '.npz', traindata=y)
        x_train = np.load(PATH + 'xtraindata_' + feature + '.npz',
                          allow_pickle=True)['traindata']
        y_train = np.load(PATH + 'ytraindata_' + feature + '.npz',
                          allow_pickle=True)['traindata']
        if not os.path.isfile(PATH + 'xvaldata_' + feature + '.npz') or not \
                os.path.isfile(PATH + 'yvaldata_' + feature + '.npz'):
            x, y = dynamic_features(valdata,
                                    feature_name=feature,
                                    dynamic=dynamic)
            np.savez(PATH + 'xvaldata_' + feature + '.npz', valdata=x)
            np.savez(PATH + 'yvaldata_' + feature + '.npz', valdata=y)
        x_val = np.load(PATH + 'xvaldata_' + feature + '.npz',
                        allow_pickle=True)['valdata']
        y_val = np.load(PATH + 'yvaldata_' + feature + '.npz',
                        allow_pickle=True)['valdata']
        if not os.path.isfile(PATH + 'xtestdata_' + feature + '.npz') or not \
                os.path.isfile(PATH + 'ytestdata_' + feature + '.npz'):
            x, y = dynamic_features(testdata,
                                    feature_name=feature,
                                    dynamic=dynamic)
            np.savez(PATH + 'xtestdata_' + feature + '.npz', testdata=x)
            np.savez(PATH + 'ytestdata_' + feature + '.npz', testdata=y)
        x_test = np.load(PATH + 'xtestdata_' + feature + '.npz',
                         allow_pickle=True)['testdata']
        y_test = np.load(PATH + 'ytestdata_' + feature + '.npz',
                         allow_pickle=True)['testdata']

    # Flatten into matrix
    x_train = flatten_data(x_train)
    x_val = flatten_data(x_val)
    x_test = flatten_data(x_test)
    y_train = flatten_targets(y_train)
    y_val = flatten_targets(y_val)
    y_test = flatten_targets(y_test)
    # Normalize data
    scaler = StandardScaler()
    scaler.fit(x_train)
    x_train = scaler.transform(x_train).astype('float32')
    x_val = scaler.transform(x_val).astype('float32')
    x_test = scaler.transform(x_test).astype('float32')
    output_dim = len(state_list)
    y_train = target_to_index(y_train, state_list)
    y_val = target_to_index(y_val, state_list)
    y_test = target_to_index(y_test, state_list)
    y_train = np_utils.to_categorical(y_train, output_dim)
    y_val = np_utils.to_categorical(y_val, output_dim)
    y_test = np_utils.to_categorical(y_test, output_dim)

    # Model train
    if dynamic:
        if not os.path.isfile('model_' + feature + 'dynamic' + '.h5'):
            classifier = model.classifier(x_train[0].shape, output_dim)
            classifier.compile(loss='categorical_crossentropy',
                               optimizer='sgd',
                               metrics=['accuracy'])
            classifier.fit(x_train,
                           y_train,
                           validation_data=(x_val, y_val),
                           batch_size=256,
                           epochs=100)
            classifier.save('model_' + feature + 'dynamic' + '.h5')
        else:
            classifier = tf.keras.models.load_model('model_' + feature +
                                                    'dynamic' + '.h5')
    else:
        if not os.path.isfile('model_' + feature + '.h5'):
            classifier = model.classifier(x_train[0].shape, output_dim)
            classifier.compile(loss='categorical_crossentropy',
                               optimizer='sgd',
                               metrics=['accuracy'])
            classifier.fit(x_train,
                           y_train,
                           validation_data=(x_val, y_val),
                           batch_size=256,
                           epochs=100)
            classifier.save('model_' + feature + '.h5')
        else:
            classifier = tf.keras.models.load_model('model_' + feature + '.h5')

    group = True
    merge = True
    y_pred = classifier.predict(x_test, batch_size=256)
    if group:
        if merge:
            y_pred = np.argmax(y_pred, axis=1)
            y_test = np.argmax(y_test, axis=1)
            y_pred = np.array([state_list[item] for item in y_pred])
            y_test = np.array([state_list[item] for item in y_test])
            y_pred = group_phonem(y_pred)
            y_test = group_phonem(y_test)
            y_pred = target_to_index(
                y_pred, sorted(set(group_phonem(np.array(state_list)))))
            y_test = target_to_index(
                y_test, sorted(set(group_phonem(np.array(state_list)))))
            y_pred = np_utils.to_categorical(
                y_pred, len(set(group_phonem(np.array(state_list)))))
            y_test = np_utils.to_categorical(
                y_test, len(set(group_phonem(np.array(state_list)))))
            likelihood, y_test = merge_states(y_pred, y_test)
            y_pred = np.argmax(likelihood, axis=1)
            y_true = np.argmax(y_test, axis=1)
            phone_error_rate = compute_per(y_pred, y_true, likelihood)
            print("Phone Error Rate: " + str(phone_error_rate))
        else:
            y_pred = np.argmax(y_pred, axis=1)
            y_true = np.argmax(y_test, axis=1)
            y_pred = np.array([state_list[item] for item in y_pred])
            y_true = np.array([state_list[item] for item in y_true])
            y_pred = group_phonem(y_pred)
            y_true = group_phonem(y_true)
            confusion_mtx = confusion_matrix(y_true, y_pred)
            plot_confusion_matrix(confusion_mtx,
                                  'Phoneme',
                                  classes=sorted(
                                      set(group_phonem(np.array(state_list)))))
            print("Total accuracy: " +
                  str(np.sum(y_true == y_pred) / y_true.shape[0]))
    else:
        if merge:
            likelihood, y_test = merge_states(y_pred, y_test)
            y_pred = np.argmax(likelihood, axis=1)
            y_true = np.argmax(y_test, axis=1)
            phone_error_rate = compute_per(y_pred, y_true, likelihood)
            print("Phone Error Rate: " + str(phone_error_rate))
        else:
            y_pred = np.argmax(y_pred, axis=1)
            y_true = np.argmax(y_test, axis=1)
            confusion_mtx = confusion_matrix(y_true, y_pred)
            plot_confusion_matrix(confusion_mtx, 'State', classes=state_list)
            print("Total accuracy: " +
                  str(np.sum(y_true == y_pred) / y_true.shape[0]))
Exemple #28
0
def main():
    args = parse_args()

    torch.manual_seed(args.manual_seed)

    dataroot = join(os.getcwd(), args.data_dir)
    transform = transforms.Compose([
        # transforms.RandomCrop(args.img_size, padding=None, pad_if_needed=True, fill=0, padding_mode='edge'),
        transforms.Resize((args.img_size, args.img_size)),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5)),
    ])
    src_dataset = dataset_public(
        root=dataroot,
        transform=transform,
        train=True,
        domains=args.source,
    )
    src_loader = torch.utils.data.DataLoader(
        dataset=src_dataset,
        batch_size=args.batch_size,
        shuffle=True,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=False,
    )
    tar_dataset = dataset_public(
        root=dataroot,
        transform=transform,
        train=True,
        domains=args.target,
    )
    tar_loader = torch.utils.data.DataLoader(
        dataset=tar_dataset,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,
        pin_memory=True,
        drop_last=False,
    )

    # models
    src_CNN = resnet_345(args).cuda()
    tar_CNN = resnet_345(args).cuda()
    cls = classifier().cuda()

    checkpoint_path = join(os.getcwd(), args.model_dir, args.model_name)
    load_checkpoint(checkpoint_path, src_CNN)
    load_checkpoint(checkpoint_path, tar_CNN)
    load_checkpoint(checkpoint_path, cls)

    fixed_pretrained(src_CNN)
    fixed_pretrained(cls)

    # optimizer
    if args.SGD:
        optimizer = optim.SGD(tar_CNN.parameters(), lr=args.lr, momentum=0.9)
    elif args.Adam:
        optimizer = optim.Adam(tar_CNN.parameters(), lr=args.lr)

    min_len = min(len(src_loader), len(tar_loader))
    best_acc = 0
    stop_count = 0
    err_log_path = join(os.getcwd(), 'models',
                        'adda_{}_err.txt'.format(args.target[0]))
    err_log = open(err_log_path, 'w')
    for epoch in range(args.start_epoch, args.max_epochs + 1):
        src_CNN.eval()
        tar_CNN.train()
        cls.eval()

        print('\nEpoch = {}'.format(epoch))
        err_log.write('Epoch = {}, '.format(epoch))

        losses, train_acc = AverageMeter(), AverageMeter()
        train_pbar = tqdm(total=min_len, ncols=100, leave=True)
        for i, (src_data, tar_data) in enumerate(zip(src_loader, tar_loader)):
            src_imgs, _ = src_data
            tar_imgs, tar_labels = tar_data
            # src_imgs, src_labels = src_data

            src_imgs, tar_imgs, tar_labels = src_imgs.cuda(), tar_imgs.cuda(
            ), tar_labels.cuda()
            # src_imgs, tar_imgs, tar_labels, src_labels = src_imgs.cuda(), tar_imgs.cuda(), tar_labels.cuda(), src_labels.cuda()

            tar_CNN.zero_grad()
            src_feature = src_CNN(src_imgs)
            tar_feature = tar_CNN(tar_imgs)

            loss = F.mse_loss(src_feature, tar_feature, reduction='mean')
            loss.backward()

            class_output = cls(tar_feature)
            pred = class_output.max(1, keepdim=True)[1]
            correct = pred.eq(tar_labels.view_as(pred)).sum().item()

            optimizer.step()

            losses.update(loss.data.item(), args.batch_size)
            train_acc.update(correct, args.batch_size)
            train_pbar.update()

            train_pbar.set_postfix({
                'loss': '{:.4f}'.format(losses.avg),
                'acc': '{:.4f}'.format(train_acc.acc),
            })
        train_pbar.close()

        if train_acc.acc > best_acc:
            best_acc = train_acc.acc
            stop_count = 0
            checkpoint_path = join(os.getcwd(), 'models',
                                   'adda_{}.pth'.format(args.target[0]))
            save_checkpoint(checkpoint_path, tar_CNN, cls, optimizer)
        else:
            stop_count += 1

        err_log.write('Loss: {:.4f}, Accuracy: {:.4f}\n'.format(
            losses.avg, train_acc.acc))
        err_log.flush()

        if stop_count == args.early_stopping: break

    err_log.write('Best test_acc: {:.4f}\n'.format(best_acc))
    err_log.close()
Exemple #29
0
    with tf.variable_scope('GAN'):
        model = WGAN(h, w, c, z_dim=z_dim, gf_dim=64, df_dim=64)
        g_loss, d_loss, X_gen_sb, img_diff_cost_sb, X_fake_test_sb, y1_fake_test_sb = model.loss(X_real_sb=names_records['X'],
                                                                batch_size=batch_size,
                                                                X_ph = X_ph,
                                                                z_ph = z_ph,
                                                                improved_wgan=True,
                                                                weight_regularize=True)




    h_p, w_p = patch_size
    with tf.variable_scope('Classifier'):
        y_train_sb, y_test_sb = classifier(X_patch_ph, X_gen_patch_ph, h_p, w_p)
        cls_train_cost_sb = tg.cost.entropy(y_train_sb, y_patch_ph)
        cls_valid_cost_sb = tg.cost.entropy(y_test_sb, y_patch_ph)



        # sess.run(init_op)



    optimizer = tf.train.AdamOptimizer(cls_learning_rate)
    update_ops = ops.get_collection(ops.GraphKeys.UPDATE_OPS, 'Classifier')
    with ops.control_dependencies(update_ops):
        train_op = optimizer.minimize(cls_train_cost_sb)

Exemple #30
0
                        '--checkpoint',
                        type=str,
                        required=True,
                        help="model checkpoint")
    args = parser.parse_args()

    # Dataloader
    test_data = MyDataset('./dataset/metadata/test.csv',
                          'test',
                          transform=test_transform)
    test_loader = DataLoader(test_data,
                             batch_size=32,
                             shuffle=False,
                             num_workers=8)

    clf = classifier()
    clf.load_state_dict(torch.load(args.checkpoint))
    clf = clf.cuda()
    clf.eval()

    f = open('test.csv', 'w')

    preds = []
    for i, (batch_input, target) in enumerate(test_loader):
        batch_input = batch_input.cuda()
        target = target.cuda()
        output, _ = clf(batch_input)

        pred = torch.max(output, 1)[1]
        preds.extend(pred.tolist())