コード例 #1
0
def test():
    # learnin curve
    train_loss = np.array([i["train_loss"] for i in net1.train_history_])
    valid_loss = np.array([i["valid_loss"] for i in net1.train_history_])
    pyplot.plot(train_loss, linewidth=3, label="train")
    pyplot.plot(valid_loss, linewidth=3, label="valid")
    pyplot.grid()
    pyplot.legend()
    pyplot.xlabel("epoch")
    pyplot.ylabel("loss")
    pyplot.ylim(1e-3, 1e-2)
    pyplot.yscale("log")
    pyplot.show()

    # plot samples
    X, _ = load(isTrain=False)
    y_pred = net1.predict(X)

    fig = pyplot.figure(figsize=(6, 6))
    fig.subplots_adjust(
        left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

    for i in range(16):
        ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
        plot_sample(X[i], y_pred[i], ax)

    pyplot.show()
コード例 #2
0
    for data in face_dataset_test:
        resized_input = data['image'].reshape(-1, 1, 96, 96)
        _tensor = torch.from_numpy(resized_input)

        outputs = run_subsets(_tensor,
                              settings=utils.SETTINGS,
                              load_from_file=True)

        this_output = outputs.numpy()
        numpy_out = this_output[0] * 48 + 48
        numpy_out = numpy_out.clip(0, 96)
        output_list.append(numpy_out)
        if plot_index < 16:
            ax = fig.add_subplot(4, 4, plot_index + 1, xticks=[], yticks=[])
            with torch.no_grad():
                plot_sample(data['image'], (outputs).reshape(30, 1), ax)
            plot_index = plot_index + 1

    #plt.show()
    columns = ()
    for cols in utils.SPECIALIST_SETTINGS:
        columns += cols['columns']
    print columns.index('mouth_left_corner_x')
    #df = DataFrame(numpy_out, columns=columns)

    lookup_table = read_csv(utils.FLOOKUP)
    values = []

    for index, row in lookup_table.iterrows():
        values.append((
            row['RowId'],
コード例 #3
0
    D_real = discriminator(X)
    D_fake = discriminator(G, reuse=True)

    return G, D_real, D_fake

# Define constants
NUM_EPOCHS = 100
BATCH_SIZE = 128
LEARNING_RATE = 0.0002
BETA1 = 0.5
NOISE_DIM = 100
SAMPLE_SIZE = 100

# Load mnist data
X_train = utils.load_mnist_data()
utils.plot_sample(X_train[:SAMPLE_SIZE], "output/mnist_data.png")
X_train = utils.preprocess_images(X_train)
mini_batches = utils.random_mini_batches(X_train, BATCH_SIZE)

# Create DCGAN
X = tf.placeholder(tf.float32, shape=(None, X_train.shape[1], X_train.shape[2], X_train.shape[3]))
Z = tf.placeholder(tf.float32, [None, NOISE_DIM])
G, D_real, D_fake = create_gan(X, Z)

# Create training steps
G_loss_func, D_loss_func = utils.create_loss_funcs(D_real, D_fake)
G_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="Generator")
D_vars = tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, scope="Discriminator")
G_train_step = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE, beta1=BETA1).minimize(G_loss_func, var_list=G_vars)
D_train_step = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE, beta1=BETA1).minimize(D_loss_func, var_list=D_vars)
コード例 #4
0
def train(args):

    # Device Configuration #
    device = torch.device(
        f'cuda:{args.gpu_num}' if torch.cuda.is_available() else 'cpu')

    # Fix Seed for Reproducibility #
    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed(args.seed)

    # Samples, Plots, Weights and CSV Path #
    paths = [
        args.samples_path, args.plots_path, args.weights_path, args.csv_path
    ]
    for path in paths:
        make_dirs(path)

    # Prepare Data #
    data = pd.read_csv(args.data_path)[args.column]

    # Pre-processing #
    scaler_1 = StandardScaler()
    scaler_2 = StandardScaler()
    preprocessed_data = pre_processing(data, scaler_1, scaler_2, args.delta)

    X = moving_windows(preprocessed_data, args.ts_dim)
    label = moving_windows(data.to_numpy(), args.ts_dim)

    # Prepare Networks #
    D = Discriminator(args.ts_dim).to(device)
    G = Generator(args.latent_dim, args.ts_dim,
                  args.conditional_dim).to(device)

    # Loss Function #
    if args.criterion == 'l2':
        criterion = nn.MSELoss()
    elif args.criterion == 'wgangp':
        pass
    else:
        raise NotImplementedError

    # Optimizers #
    D_optim = torch.optim.Adam(D.parameters(), lr=args.lr, betas=(0.5, 0.9))
    G_optim = torch.optim.Adam(G.parameters(), lr=args.lr, betas=(0.5, 0.9))

    D_optim_scheduler = get_lr_scheduler(D_optim, args)
    G_optim_scheduler = get_lr_scheduler(G_optim, args)

    # Lists #
    D_losses, G_losses = list(), list()

    # Train #
    print("Training Time Series GAN started with total epoch of {}.".format(
        args.num_epochs))

    for epoch in range(args.num_epochs):

        # Initialize Optimizers #
        G_optim.zero_grad()
        D_optim.zero_grad()

        if args.criterion == 'l2':
            n_critics = 1
        elif args.criterion == 'wgangp':
            n_critics = 5

        #######################
        # Train Discriminator #
        #######################

        for j in range(n_critics):
            series, start_dates = get_samples(X, label, args.batch_size)

            # Data Preparation #
            series = series.to(device)
            noise = torch.randn(args.batch_size, 1, args.latent_dim).to(device)

            # Adversarial Loss using Real Image #
            prob_real = D(series.float())

            if args.criterion == 'l2':
                real_labels = torch.ones(prob_real.size()).to(device)
                D_real_loss = criterion(prob_real, real_labels)

            elif args.criterion == 'wgangp':
                D_real_loss = -torch.mean(prob_real)

            # Adversarial Loss using Fake Image #
            fake_series = G(noise)
            fake_series = torch.cat(
                (series[:, :, :args.conditional_dim].float(),
                 fake_series.float()),
                dim=2)

            prob_fake = D(fake_series.detach())

            if args.criterion == 'l2':
                fake_labels = torch.zeros(prob_fake.size()).to(device)
                D_fake_loss = criterion(prob_fake, fake_labels)

            elif args.criterion == 'wgangp':
                D_fake_loss = torch.mean(prob_fake)
                D_gp_loss = args.lambda_gp * get_gradient_penalty(
                    D, series.float(), fake_series.float(), device)

            # Calculate Total Discriminator Loss #
            D_loss = D_fake_loss + D_real_loss

            if args.criterion == 'wgangp':
                D_loss += args.lambda_gp * D_gp_loss

            # Back Propagation and Update #
            D_loss.backward()
            D_optim.step()

        ###################
        # Train Generator #
        ###################

        # Adversarial Loss #
        fake_series = G(noise)
        fake_series = torch.cat(
            (series[:, :, :args.conditional_dim].float(), fake_series.float()),
            dim=2)
        prob_fake = D(fake_series)

        # Calculate Total Generator Loss #
        if args.criterion == 'l2':
            real_labels = torch.ones(prob_fake.size()).to(device)
            G_loss = criterion(prob_fake, real_labels)

        elif args.criterion == 'wgangp':
            G_loss = -torch.mean(prob_fake)

        # Back Propagation and Update #
        G_loss.backward()
        G_optim.step()

        # Add items to Lists #
        D_losses.append(D_loss.item())
        G_losses.append(G_loss.item())

        ####################
        # Print Statistics #
        ####################

        print("Epochs [{}/{}] | D Loss {:.4f} | G Loss {:.4f}".format(
            epoch + 1, args.num_epochs, np.average(D_losses),
            np.average(G_losses)))

        # Adjust Learning Rate #
        D_optim_scheduler.step()
        G_optim_scheduler.step()

        # Save Model Weights and Series #
        if (epoch + 1) % args.save_every == 0:
            torch.save(
                G.state_dict(),
                os.path.join(
                    args.weights_path,
                    'TimeSeries_Generator_using{}_Epoch_{}.pkl'.format(
                        args.criterion.upper(), epoch + 1)))

            series, fake_series = generate_fake_samples(
                X, label, G, scaler_1, scaler_2, args, device)
            plot_sample(series, fake_series, epoch, args)
            make_csv(series, fake_series, epoch, args)

    print("Training finished.")
コード例 #5
0
def generate_timeseries(args):

    # Device Configuration #
    device = torch.device(
        f'cuda:{args.gpu_num}' if torch.cuda.is_available() else 'cpu')

    # Inference Path #
    make_dirs(args.inference_path)

    # Prepare Generator #
    if args.model == 'skip':
        G = SkipGenerator(args.latent_dim, args.ts_dim,
                          args.conditional_dim).to(device)
        G.load_state_dict(
            torch.load(
                os.path.join(
                    args.weights_path,
                    'TimeSeries_Generator_using{}_Epoch_{}.pkl'.format(
                        args.criterion.upper(), args.num_epochs))))

    else:
        raise NotImplementedError

    # Prepare Data #
    data = pd.read_csv(args.data_path)[args.column]

    scaler_1 = StandardScaler()
    scaler_2 = StandardScaler()

    preprocessed_data = pre_processing(data, scaler_1, scaler_2, args.delta)

    X = moving_windows(preprocessed_data, args.ts_dim)
    label = moving_windows(data.to_numpy(), args.ts_dim)

    # Lists #
    real, fake = list(), list()

    # Inference #
    for idx in range(0, data.shape[0], args.ts_dim):

        end_ix = idx + args.ts_dim

        if end_ix > len(data) - 1:
            break

        samples = X[idx, :]
        samples = np.expand_dims(samples, axis=0)
        samples = np.expand_dims(samples, axis=1)

        samples = torch.from_numpy(samples).to(device)
        start_dates = label[idx, 0]

        noise = torch.randn(args.val_batch_size, 1, args.latent_dim).to(device)

        with torch.no_grad():
            fake_series = G(noise)
        fake_series = torch.cat((samples[:, :, :args.conditional_dim].float(),
                                 fake_series.float()),
                                dim=2)

        samples = np.squeeze(samples.cpu().data.numpy())
        fake_series = np.squeeze(fake_series.cpu().data.numpy())

        samples = post_processing(samples, start_dates, scaler_1, scaler_2,
                                  args.delta)
        fake_series = post_processing(fake_series, start_dates, scaler_1,
                                      scaler_2, args.delta)

        real += samples.tolist()
        fake += fake_series.tolist()

    plot_sample(real, fake, args.num_epochs - 1, args)
    make_csv(real, fake, args.num_epochs - 1, args)
コード例 #6
0
im_height = 512
im_width = 512
input_img = Input((im_height, im_width, 1), name='img')
model = unet(input_img, n_filters=32, dropout=0.5, batchnorm=True)
model.compile(optimizer=Adam(), loss=dice_coef_loss, metrics=[dice_coef])

callbacks = [
    ReduceLROnPlateau(factor=0.1, patience=5, min_lr=0.0001, verbose=1),
    ModelCheckpoint('UNet256.h5', verbose=1, save_best_only=True, save_weights_only=True)
]

results = model.fit(X_train, y_train, batch_size=32, epochs=100, callbacks=callbacks, validation_data=(X_valid, y_valid))

# Model evaluations
# load the best model
model.load_weights('UNet256.h5')
# Evaluate on train set
model.evaluate(X_train, y_train, verbose=1)
# Evaluate on validation set
model.evaluate(X_valid, y_valid, verbose=1)
# Predict on train, val
preds_train = model.predict(X_train, verbose=1)
preds_val = model.predict(X_valid, verbose=1)
# Threshold predictions
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_val_t = (preds_val > 0.5).astype(np.uint8)
# Randomly visualize the predictions & corresponding dice coefficient
plot_sample(X_train, y_train, preds_train, preds_train_t)
plot_sample(X_valid, y_valid, preds_val, preds_val_t)
コード例 #7
0
from model import resolve_single
from model.edsr import edsr

from utils import load_image, plot_sample

model = edsr(scale=4, num_res_blocks=16)
model.load_weights('weights/edsr-16-x4/weights.h5')

lr = load_image('demo/frame-1214_frame_blend')
sr = resolve_single(model, lr)

plot_sample(lr, sr)
コード例 #8
0
ファイル: ava.py プロジェクト: zhaoyin214/nima_pytorch
    # ava_dataset.labels.to_csv(AVA_LABEL_CLEANED_FILEPATH)

    # # training, validation and test sets
    # labels_train, labels_val, labels_test = train_val_test_split(
    #     dataset=ava_dataset.labels, val_ratio=VAL_RATIO, test_ratio=TEST_RATIO
    # )
    # labels_train.to_csv(AVA_TRAIN_LABEL_FILEPATH)
    # labels_val.to_csv(AVA_VAL_LABEL_FILEPATH)
    # labels_test.to_csv(AVA_TEST_LABEL_FILEPATH)

    ava_dataset.labels = AVA_LABEL_CLEANED_FILEPATH
    ava_dataset.image_root = AVA_IMAGE_ROOT

    print("-" * 20)
    print("AVA dataset labels:")
    print("-" * 20)
    print(ava_dataset.labels.sample(10))
    print(ava_dataset.labels.describe())

    # samples
    for idx in range(len(ava_dataset)):

        print(idx)
        sample = ava_dataset[idx]
        plot_sample(sample=sample)

        if idx == 3:
            break

#%%
コード例 #9
0
ファイル: train.py プロジェクト: yuanluw/DCGAN
def run(arg):
    torch.manual_seed(7)
    np.random.seed(7)
    print("lr %f, epoch_num %d, decay_rate %f gamma %f" % (arg.lr, arg.epochs, arg.decay, arg.gamma))

    print("====>Loading data")
    if arg.dataset == 'mnist':
       train_data = get_mnist_dataset("train", arg.batch_size)
       G_output_dim = config.mnist_G_output_dim
       D_input_dim = config.mnist_D_input_dim
    elif arg.dataset == 'cifar':
        train_data = get_cifar_dataset("train", arg.batch_size)
        G_output_dim = config.cifar_G_output_dim
        D_input_dim = config.cifar_D_input_dim

    print("====>Building model")
    if arg.net == "DCGAN":
        g_net = DC_Generator(config.G_input_dim, config.num_filters, G_output_dim, verbose=False)
        d_net = DC_Discriminator(D_input_dim, config.num_filters[::-1], config.D_output_dim, verbose=False)

        # g_net = Generator()
        # d_net = Discriminator()

        g_net = g_net.to(config.device)
        d_net = d_net.to(config.device)

    g_optimizer = optim.Adam(g_net.parameters(), lr=arg.lr, betas=(0.5, 0.9))
    d_optimizer = optim.Adam(d_net.parameters(), lr=arg.lr, betas=(0.5, 0.9))

    if arg.mul_gpu:
        g_net = nn.DataParallel(g_net)
        d_net = nn.DataParallel(d_net)

    if arg.checkpoint is not None:
        print("load pre train model")
        g_pretrained_dict = torch.load(os.path.join(config.checkpoint_path, arg.dataset + "_" +
                                                      arg.net + "_g_net_" + arg.checkpoint + '.pth'))
        d_pretrained_dict = torch.load(os.path.join(config.checkpoint_path, arg.dataset + "_" +
                                                    arg.net + "_d_net_" + arg.checkpoint + '.pth'))
        g_net = load_weight(g_pretrained_dict, g_net)
        d_net = load_weight(d_pretrained_dict, d_net)

    # 日志系统
    logger = get_logger()

    criterion = nn.BCELoss()

    print('Total params: %.2fM' % ((sum(p.numel() for p in g_net.parameters()) +
                                    sum(p.numel() for p in d_net.parameters())) / 1000000.0))
    print("start training: ", datetime.now())
    start_epoch = 0

    fix_noise = torch.autograd.Variable(torch.randn(config.nrow ** 2, config.G_input_dim).view(-1, config.G_input_dim,
                                                                                               1, 1).cuda())
    g_losses = []
    d_losses = []
    for epoch in range(start_epoch, arg.epochs):
        prev_time = datetime.now()

        g_loss, d_loss = train(train_data, g_net, d_net, criterion, g_optimizer, d_optimizer, epoch, logger)
        now_time = datetime.now()
        time_str = count_time(prev_time, now_time)
        print("train: current (%d/%d) batch g_loss is %f d_loss is %f time "
              "is %s" % (epoch, arg.epochs,  g_loss, d_loss, time_str))

        g_losses.append(g_loss)
        d_losses.append(d_loss)

        plot_loss(d_losses, g_losses, epoch, arg.net, arg.dataset)
        plot_sample(g_net, fix_noise, epoch, net_name=arg.net, dataset_name=arg.dataset)

        if epoch % 2 == 0:
            save_checkpoint(arg.dataset, arg.net,  epoch, g_net, d_net)

    save_checkpoint(arg.dataset, arg.net, arg.epochs, g_net, d_net)
コード例 #10
0
    def eval_model(self, device, model, eval_loader, save_folder, save_images,
                   is_validation, plot_seg):
        model.eval()

        dsize = self.cfg.INPUT_WIDTH, self.cfg.INPUT_HEIGHT

        res = []
        predictions, ground_truths = [], []

        for data_point in eval_loader:
            image, seg_mask, seg_loss_mask, _, sample_name = data_point
            image, seg_mask = image.to(device), seg_mask.to(device)
            is_pos = (seg_mask.max() > 0).reshape((1, 1)).to(device).item()
            prediction, pred_seg = model(image)
            pred_seg = nn.Sigmoid()(pred_seg)
            prediction = nn.Sigmoid()(prediction)

            prediction = prediction.item()
            image = image.detach().cpu().numpy()
            pred_seg = pred_seg.detach().cpu().numpy()
            seg_mask = seg_mask.detach().cpu().numpy()

            predictions.append(prediction)
            ground_truths.append(is_pos)
            res.append((prediction, None, None, is_pos, sample_name[0]))
            if not is_validation:
                if save_images:
                    image = cv2.resize(
                        np.transpose(image[0, :, :, :], (1, 2, 0)), dsize)
                    image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
                    pred_seg = cv2.resize(pred_seg[0, 0, :, :], dsize) if len(
                        pred_seg.shape) == 4 else cv2.resize(
                            pred_seg[0, :, :], dsize)
                    seg_mask = cv2.resize(seg_mask[0, 0, :, :], dsize)
                    if self.cfg.WEIGHTED_SEG_LOSS:
                        seg_loss_mask = cv2.resize(
                            seg_loss_mask.numpy()[0, 0, :, :], dsize)
                        utils.plot_sample(sample_name[0],
                                          image,
                                          pred_seg,
                                          seg_loss_mask,
                                          save_folder,
                                          decision=prediction,
                                          plot_seg=plot_seg)
                    else:
                        utils.plot_sample(sample_name[0],
                                          image,
                                          pred_seg,
                                          seg_mask,
                                          save_folder,
                                          decision=prediction,
                                          plot_seg=plot_seg)

        if is_validation:
            metrics = utils.get_metrics(np.array(ground_truths),
                                        np.array(predictions))
            FP, FN, TP, TN = list(
                map(sum, [
                    metrics["FP"], metrics["FN"], metrics["TP"], metrics["TN"]
                ]))
            self._log(
                f"VALIDATION || AUC={metrics['AUC']:f}, and AP={metrics['AP']:f}, with best thr={metrics['best_thr']:f} "
                f"at f-measure={metrics['best_f_measure']:.3f} and FP={FP:d}, FN={FN:d}, TOTAL SAMPLES={FP + FN + TP + TN:d}"
            )

            return metrics["AP"], metrics["accuracy"]
        else:
            utils.evaluate_metrics(res, self.run_path, self.run_name)
コード例 #11
0
ファイル: app.py プロジェクト: tarunh9/AI-Upscaling
st.subheader("Try it")
upload = st.file_uploader("Upload an iamge", type=["png", "jpg", "jpeg"])
if upload == None:
    st.write("Please upload the file")
else:
    st.write("")
st.write("")

if st.button("Submit"):
    with st.spinner("Upscaling image..."):
        model = generator()
        model.load_weights('weights/srgan/weights_final.h5')
        lr = load_image(upload)
        sr = resolve_single(model, lr)
    st.subheader("Comparison")
    st.pyplot(plot_sample(lr, sr))
    sp = int(sr.shape[0]*sr.shape[1])
    lp = int(lr.shape[0]*lr.shape[1])
    st.write("Original Image: ",lp,"pixels")
    st.write("Upscaled Image: ",sp,"pixels")

st.text("")

st.write("Designed and Developed by **Tarun Venkatesh H**")
hide_streamlit_style = """
            <style>
            #MainMenu {visibility: hidden;}
            footer {visibility: hidden;}
            </style>
            """
st.markdown(hide_streamlit_style, unsafe_allow_html=True) 
コード例 #12
0
    X, _ = load2d(ftestf, test=True)
    y_pred = net.predict(X)

    # try to display the estimated landmarks on images
    paths = loadCSV(ftestf)
    fileNames = extract_fileNames(paths)

    for i in range(len(y_pred)):
        predi = y_pred[i]
        #filename = FSAVEFOLDER + FNAMES[i]
        write_file(flandmarks, predi)
        #write_file(filename,"\n")
        saveImg = FSAVEIMAGES + fileNames[i]
        fig = pyplot.figure()
        ax = fig.add_subplot(1, 1, 1, xticks=[], yticks=[])
        plot_sample(X[i], predi, ax)
        fig.savefig(saveImg)
        pyplot.close(fig)

    print('Finish!')
'''
# plot the landmarks on the images
fig = pyplot.figure(figsize=(4, 4))
fig.subplots_adjust(
    left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

for i in range(16):
    ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
    plot_sample(X[i], y_pred[i], ax)

pyplot.show()
コード例 #13
0
ファイル: demo.py プロジェクト: Jorgetr88/EDSR_project
def resolve_and_plot(lr_image_path):
    lr = load_image(lr_image_path)
    #plot_sample_no_treatment(lr)
    sr = resolve_single(model,lr)
    plot_sample(lr,sr)
コード例 #14
0
	X1,y1 = load2d(FTRAINF,test=False)

	#=================================================================
	# Load the parameters into list of layer, create a new network and train		
	newlayers = set_weights(FMODEL)
	net2 = build_fine_tuning_model(newlayers)
	net2.fit(X1,y1)
	
    # Save the fine-tuning model
	sys.setrecursionlimit(150000)
	with open('/data3/linhlv/2018/saveModels/cnnmodel_all_10000_pronotum_fine_tune_v19.pickle','wb') as f:
		pickle.dump(net2,f,-1)

	# draw the loss
	draw_loss(net2)

	# test the fine-tuning network and draw the results
	X, _ = load2d(FTESTF,test=True)
	y_pred = net2.predict(X)

	fig = pyplot.figure(figsize=(4, 4))
	fig.subplots_adjust(
    		left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)
	for i in range(16):
    		ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
    		plot_sample(X[i], y_pred[i], ax)
	pyplot.show()
	
	
	
コード例 #15
0
    # plot samples
    X, _ = load(isTrain=False)
    y_pred = net1.predict(X)

    fig = pyplot.figure(figsize=(6, 6))
    fig.subplots_adjust(
        left=0, right=1, bottom=0, top=1, hspace=0.05, wspace=0.05)

    for i in range(16):
        ax = fig.add_subplot(4, 4, i + 1, xticks=[], yticks=[])
        plot_sample(X[i], y_pred[i], ax)

    pyplot.show()






if __name__ == '__main__':
    X, y = load()
    print("X.shape == {}; X.min == {:.3f}; X.max == {:.3f}".format(
        X.shape, X.min(), X.max()))
    print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(
        y.shape, y.min(), y.max()))

    net1.fit(X, y)
    test()
    plot_sample()