Пример #1
0
    def __init__(self, args, data, tuning):
        self.FLAGS = args
        self.data = data
        self.tuning = tuning
        self.embedding_init = embed(self.data)

        self.model = LSTMClassifier(self.FLAGS, self.embedding_init)
        logits = self.model.inference()
        self.train_loss = self.model.loss(logits)
        self.train_op = self.model.training(self.train_loss[0])

        pred = self.model.inference(forward_only=True)
        self.test_loss = self.model.loss(pred, forward_only=True)

        # Visualizing loss function and accuracy during training over epochs
        self.plotter = LossAccPlotter(title="Training plots",
                                      save_to_filepath="../img/lstm_plot.png",
                                      show_regressions=False,
                                      show_averages=False,
                                      show_loss_plot=True,
                                      show_acc_plot=True,
                                      show_plot_window=True,
                                      x_label="Epoch")

        self.init = tf.group(tf.global_variables_initializer(),
                             tf.local_variables_initializer())
        print("Network initialized..")
def plot_loss_acc_history(fit_history, optimizer_name):

    plotter = LossAccPlotter(
        title=optimizer + ': Loss and Accuracy Performance',
        save_to_filepath='loss_acc_plots/' + optimizer + '.png',
        show_regressions=True,
        show_averages=False,
        show_loss_plot=True,
        show_acc_plot=True,
        show_plot_window=True,
        x_label="Epoch")

    num_epochs = len(fit_history['acc'])

    for epoch in range(num_epochs):

        acc_train = fit_history['acc'][epoch]
        loss_train = fit_history['loss'][epoch]

        acc_val = fit_history['val_acc'][epoch]
        loss_val = fit_history['val_loss'][epoch]

        plotter.add_values(epoch,
                           loss_train=loss_train,
                           acc_train=acc_train,
                           loss_val=loss_val,
                           acc_val=acc_val,
                           redraw=False)

    plotter.redraw()
    plotter.block()
Пример #3
0
def plot_losses(data, suffix, fi_len, batch_per_epoch, **kwargs):
    # plot_data = {'X': [], 'Y': [], 'legend': []}
    other_loss = OrderedDict()

    # plot settings
    save_to_filepath = os.path.join("{}_log".format(data),
                                    "{}_plot_losses.png".format(suffix))
    plotter = LossAccPlotter(title="{} loss over time".format(suffix),
                             save_to_filepath=save_to_filepath,
                             show_regressions=False,
                             show_averages=False,
                             show_other_loss=True,
                             show_log_loss=True,
                             show_loss_plot=True,
                             show_err_plot=True,
                             show_plot_window=False,
                             epo_max=1000,
                             x_label="Epoch")
    ## load loss data
    log_path = kwargs["log_path"]
    log_file2 = open(log_path, "r")
    st_time = time.time()
    i = 0
    for li in log_file2:
        li_or = li.split(" | ")
        if len(li_or) == 1:
            continue
        iter = li_or[0].split("\t")[0][1:]
        loss_train = str2flo(li_or[0].split(":")[1].split(",")[0])
        err_train = str2flo(li_or[0].split(",")[1])
        loss_val = str2flo(li_or[1].split(":")[1].split(",")[0])
        err_val = str2flo(li_or[1].split(",")[1])
        for li2 in li_or:
            if "loss" not in li2:
                continue
            # pdb.set_trace()
            key = li2.split(": ")[0]
            value = str2flo(li2.split(": ")[1])
            if key == 'vi loss':
                value *= 1e-2
            other_loss[key] = value

        float_epoch = str2flo(iter) / batch_per_epoch
        plotter.add_values(float_epoch,
                           loss_train=loss_train,
                           loss_val=loss_val,
                           err_train=err_train,
                           err_val=err_val,
                           redraw=False,
                           other_loss=other_loss)
        i += 1
        time_str = "{}\r".format(
            calculate_remaining(st_time, time.time(), i, fi_len))
        # print(time_string, end = '\r')
        sys.stdout.write(time_str)
        sys.stdout.flush()
    sys.stdout.write("\n")
    sys.stdout.flush()
    log_file2.close()
    plotter.redraw()  # save as image
Пример #4
0
def plotResults(title, a_train, c_train, a_val, c_val):
    plotter = LossAccPlotter(title=title,
                             show_averages=False,
                             save_to_filepath=plot_loc +
                             "lossAcc_{}.png".format(title),
                             show_plot_window=show_not_save)

    for e in range(len(a_train)):
        plotter.add_values(e,
                           loss_train=c_train[e],
                           acc_train=a_train[e],
                           loss_val=c_val[e],
                           acc_val=a_val[e],
                           redraw=False)

    plotter.redraw()
    plotter.block()
Пример #5
0
def plot_loss_err(data, suffix, fi_len, ema, batch_per_epoch, **kwargs):
    ## load loss data
    log_path = kwargs["log_path"]
    log_file2 = open(log_path, "r")
    st_time = time.time()

    # plot settings
    save_to_filepath = os.path.join("{}_log".format(data),
                                    "{}_plot_loss_err.png".format(suffix))
    plotter = LossAccPlotter(title="{} loss over time".format(suffix),
                             save_to_filepath=save_to_filepath,
                             show_regressions=True,
                             show_averages=True,
                             show_loss_plot=True,
                             show_err_plot=True,
                             show_ema_plot=ema,
                             show_plot_window=False,
                             x_label="Epoch")
    i = 0
    for li in log_file2:
        li_or = li.split(" | ")
        if len(li_or) == 1:
            continue
        iter = li_or[0].split("\t")[0][1:]
        loss_train = str2flo(li_or[0].split(":")[1].split(",")[0])
        err_train = str2flo(li_or[0].split(",")[1])
        loss_val = str2flo(li_or[1].split(":")[1].split(",")[0])
        err_val = str2flo(li_or[1].split(",")[1])
        ema_err_train = ema_err_val = None
        if ema:
            ema_err_train = li_or[3].split(":")[1].split(",")[0]
            ema_err_val = li_or[3].split(",")[1]
            if "None" not in ema_err_train:
                ema_err_train = str2flo(ema_err_train)
                ema_err_val = str2flo(ema_err_val)
            else:
                ema_err_train = ema_err_val = None

        float_epoch = str2flo(iter) / batch_per_epoch
        plotter.add_values(float_epoch,
                           loss_train=loss_train,
                           loss_val=loss_val,
                           err_train=err_train,
                           err_val=err_val,
                           ema_err_train=ema_err_train,
                           ema_err_val=ema_err_val,
                           redraw=False)
        i += 1
        time_str = "{}\r".format(
            calculate_remaining(st_time, time.time(), i, fi_len))
        sys.stdout.write(time_str)
        sys.stdout.flush()
    sys.stdout.write("\n")
    log_file2.close()
    plotter.redraw()  # save as image
Пример #6
0
def train(instrument, batch_size, latent_dim, epochs, mode=None):
    data_loader = load_data(instrument, batch_size)

    generator = Generator(latent_dim)
    generator.apply(weights_init)
    discriminator = Discriminator()
    discriminator.apply(weights_init)

    # print("Generator's state_dict:")
    # for param_tensor in generator.state_dict():
    #     print(param_tensor, "\t", generator.state_dict()[param_tensor].size())

    # print("Discriminator's state_dict:")
    # for param_tensor in discriminator.state_dict():
    #     print(param_tensor, "\t", discriminator.state_dict()[param_tensor].size())

    g_optimizer = optim.Adam(generator.parameters(),
                             lr=0.002,
                             betas=(0.5, 0.999))
    d_optimizer = optim.Adam(discriminator.parameters(),
                             lr=0.002,
                             betas=(0.5, 0.999))

    loss = nn.BCELoss()

    if not os.path.exists("./plots/" + instrument):
        os.makedirs("./plots/" + instrument)

    plotter = LossAccPlotter(save_to_filepath="./loss/" + instrument +
                             "/loss.png",
                             show_regressions=False,
                             show_acc_plot=False,
                             show_averages=False,
                             show_plot_window=True,
                             x_label="Epoch")

    if not os.path.exists("./model/" + instrument):
        os.makedirs("./model/" + instrument)
    if not os.path.exists("./loss/" + instrument):
        os.makedirs("./loss/" + instrument)

    epoch = 0
    d_loss_list = []
    g_loss_list = []
    while True:
        for num_batch, real_data in enumerate(data_loader):

            ############################
            # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
            ###########################
            for i in range(1):
                real_data_d = next(iter(data_loader))
                size = real_data_d.size(0)
                real_data_t = torch.ones(size, 239, 4)
                for i in range(size):
                    for j in range(239):
                        real_data_t[i][j] = torch.log(
                            real_data_d[i, j + 1, :] / real_data_d[i, j, 3])

                y_real = Variable(torch.ones(size, 1, 1))
                y_fake = Variable(torch.zeros(size, 1, 1))
                real_data = Variable(real_data_t.float())
                fake_data = Variable(
                    torch.from_numpy(
                        np.random.normal(0, 0.2,
                                         (size, latent_dim, 1))).float())
                fake_gen = generator(fake_data).detach()

                prediction_real = discriminator(real_data)
                loss_real = loss(prediction_real, y_real)
                prediction_fake = discriminator(fake_gen)
                loss_fake = loss(prediction_fake, y_fake)
                d_loss = loss_real + loss_fake

                d_optimizer.zero_grad()
                d_loss.backward()
                d_optimizer.step()

            ############################
            # (2) Update G network: maximize log(D(G(z)))
            ###########################
            size = real_data.size(0)
            y_real = Variable(torch.ones(size, 1, 1))
            fake_data = Variable(
                torch.from_numpy(
                    np.random.normal(0, 0.2, (size, latent_dim, 1))).float())
            fake_gen = generator(fake_data)
            prediction = discriminator(fake_gen)
            g_loss = loss(prediction, y_real)

            g_optimizer.zero_grad()
            g_loss.backward()
            g_optimizer.step()

            if num_batch % 20 == 0:
                tmp = torch.ones(size, 239, 4)
                data = real_data if num_batch == 0 else fake_gen
                for batch in range(size):
                    for t in range(238, 0, -1):
                        data[batch, t, 0] = torch.sum(
                            data[batch, 0:t, 3]) + data[batch, t, 0]
                        data[batch, t, 1] = torch.sum(
                            data[batch, 0:t, 3]) + data[batch, t, 1]
                        data[batch, t, 2] = torch.sum(
                            data[batch, 0:t, 3]) + data[batch, t, 2]
                        data[batch, t, 3] = torch.sum(data[batch, 0:t + 1, 3])
                data = torch.exp(data)
                tmp = tmp * data
                print("epoch: %d, num_batch: %d, d-loss: %.4f, g-loss: %.4f" %
                      (epoch, num_batch, d_loss.data.numpy(),
                       g_loss.data.numpy()))
                visualize(instrument, tmp, epoch, num_batch)

            plotter.add_values(epoch,
                               loss_train=g_loss.item(),
                               loss_val=d_loss.item())
            d_loss_list.append(d_loss.item())
            g_loss_list.append(g_loss.item())

        if epoch % 10 == 0:
            torch.save(
                generator, "./model/" + instrument + "/generator_epoch_" +
                str(epoch) + ".model")
            torch.save(
                discriminator, "./model/" + instrument +
                "/discriminator_epoch_" + str(epoch) + ".model")

            d_loss_np = np.array(d_loss_list)
            np.save(
                "./loss/" + instrument + "/d_loss_epoch_" + str(epoch) +
                ".npy", d_loss_np)
            g_loss_np = np.array(g_loss_list)
            np.save(
                "./loss/" + instrument + "/g_loss_epoch_" + str(epoch) +
                ".npy", g_loss_np)

        epoch += 1
        if mode == "test" and epoch == epochs:
            break
Пример #7
0
    "500"  : 1e-4,
    "10000": 1e-5,
    "20000": 1e-6 
}

lr_decay_power = 4
momentum = 0.9
weight_decay = 0.005
n_classes = 3
n_boxes = 5

start = time.time()
plotter = LossAccPlotter(title="YOLOv2 loss",
                         save_to_filepath="plot.png",
                         show_regressions=True,
                         show_averages=True,
                         show_loss_plot=True,
                         show_acc_plot=False,
                         show_plot_window=True,
                         x_label="Batch")

# load image generator
#print("loading image generator...")
#generator = ImageGenerator(item_path, background_path)
print("loading ImageNet generator")
imageNet_data = ImageNet_data("./XmlToTxt/water_bottle_img", "./XmlToTxt/water_bottle_bbox", "./XmlToTxt/images_list.txt", n_classes)

# load model
print("loading initial model...")
yolov2 = YOLOv2(n_classes=n_classes, n_boxes=n_boxes)
model = YOLOv2Predictor(yolov2)
serializers.load_hdf5(initial_weight_file, model)
from laplotter import LossAccPlotter
import numpy as np

loss_train = [1.78287,29.1449,7.78841e-07,13.6862,6.53347,0,20.7715,13.6161,22.2351,0.738925,13.0908,0.777441,0.0472846,5.02116,13.1627,18.9485,0.210569,11.624,2.08745,0.0386455,2.96811,3.9002,12.5514,1.50999e-07,8.83681,0.614701,4.8374,2.94422,5.65934,3.79716,2.42393e-07,3.76043,11.6713,19.8563,2.29324,9.05517,2.73297,0.315229,9.68727,6.09771,7.70933,1.86227,3.1735,0.521857,0.00618037,6.86501,6.27954,14.0736,0.000924774,9.39426,0.769922,0.0245002,1.96274,4.7008,6.90015,8.01472e-05,6.67905,1.29383,1.29645,1.74596,4.9427,6.84285,0.333404,4.63594,5.06641,15.5398,2.11646,5.02996,4.77384,0.979819,4.49098,7.96129,4.60795,1.99647,1.46992,0.838725,0.406617,4.2825,5.30763,13.3822,0.679725,6.00381,1.31804,1.93113e-05,1.47814,7.09132,6.39708,0.0904769,3.79986,2.07594,4.28488e-05,1.09619,3.30311,5.84919,6.51683e-07,5.58052,1.29333,13.7187,1.89587,4.39399,2.13659,0.00336869,3.21854,1.88764,5.14016,0.896579,1.84339,1.04903,0.000184667,5.43554,3.35623,12.1924,0.385686,9.45011,1.85467,0.0246067,0.831138,6.914,6.23844,0.795699,1.95747,0.68692,0.845176,1.0877,3.0746,8.70655,0.00318328,4.42509,0.29063,5.10376,0.989006,2.7661,0.830882,0.000447012,4.70886,5.90386,5.171,0.329779,1.71858,2.65224,0.0874849,5.12659,6.53526,8.14225,0.289866,6.99529,1.06769,0.429264,3.2046,3.89446,3.86922,0.525132,0.974393,0.388117,0.0872195,1.61082,3.31041,4.96678,2.1021e-06,6.17585,0.433333,1.33485,2.73664,1.35853,1.71845,1.46236e-05,2.19909,8.86633,12.8545,0.482935,4.31387,1.73655,0.000135461,4.80087,3.72879,6.01963,0.697551,0.589483,1.58111,1.24803,0.380378,1.94606,2.44027,0.332729,0.58631,1.66398,1.51738,0.381653,1.24591,1.21926,0.089968,0.137374,1.49212,0.414554,1.49662,1.86038,0.586595,0.479933,0.0956203,2.13886,4.08323,0.97873,5.89219,3.0324,2.33401,0.915041,1.16065,0.654674,2.64473,1.67508,2.13872,2.07879,1.037,1.61363,3.01924,0.00194727,0.213232,0.990046,1.19904,0.647825,2.37541,2.722,0.868884,0.498483,1.77989,0.176762,0.312136,1.8974,0.0906598,0.87929,0.000377702,3.5025,2.83095,0.756712,3.14806,1.67057,1.04358,0.0455291,1.58032,0.451919,0.70461,0.853231,1.32766,0.306014,0.0767751,1.2256,3.54072,0.125499,1.10718,1.34228,0.00331049,1.22306,1.02556,1.12424,0.730394,0.000257952,2.43206,0.320216,0.00270821,1.38399,1.13758,0.64275,0.304758,0.425686,1.60879,2.12142,2.41066,1.52521,1.14838,0.618358,1.23101,0.211262,0.401648,4.50295,1.65174,0.887649,0.564754,0.714206,0.780962,1.40717,1.56299,1.00119,0.5446,0.309131,2.22433,0.346811,1.47716,0.125209,1.53101,0.985111,0.162529,1.16698,1.59133,0.423444,0.249217,1.22514,0.15732,1.65533,0.683983,0.313613,0.000591039,0.752796,2.10528,0.359425,0.798952,2.03783,2.00352,0.838242,2.56902,0.731644,0.999082,1.72261,1.6797,0.316591,1.43042,0.481146,1.16165,1.44683,1.09819,0.282121,1.14727,0.982391,1.25662,1.93189,1.45891,0.00163681,0.0745168,1.0052,0.106571,1.25017,0.732631,0.272068,0.521974,0.153365,1.46922,0.216946,0.993699,4.56478,1.23891,0.54856,1.1685,2.60148,0.901606,0.762153,0.673157,0.661107,0.747385,0.583753,1.78587,2.27985,0.537414,0.519801,1.3492,1.27548,0.854889,1.06354,2.40819,0.0574775,0.351584,0.94144,0.277534,0.487703,1.59348,0.273829,0.208106,0.0106441,2.37391,2.91178,2.06921,2.95586,1.41766,1.6503,0.569348,1.00752,0.000979348,0.962174,1.35048,0.720073,0.849725,0.281346,0.397568,2.12944,0.184289,0.210439,0.167841,0.123331,0.523199,1.53868,2.51282,0.220077,0.138104,0.349864,0.346058,0.0392949,1.60673,0.736784,0.365483,0.0697581,2.10601,1.64587,0.284739,2.53066,0.317422,0.39104,0.837833,1.14931,0.00133174,0.364254,1.47253,1.49312,0.970698,0.148329,0.710574,1.1011,0.103337,2.04584,0.840574,0.738854,1.38096,0.886936,2.68339,1.03406,0.561051,1.037,0.180332,0.0567668,0.405977,2.51247,2.46447,0.266659,1.18302,1.21353,1.54037,0.431412,0.585637,0.470308,0.822154,1.09625,1.08513,0.924982,0.730777,0.0290513,2.8241,1.0741,0.0278113,1.62353,1.72484,0.291436,0.0615084,0.0688936,0.109038,0.426259,0.676662,0.0296513,1.46585,0.614088,0.818202,0.623282,0.7245,2.95407,0.801136,0.383492,0.82544,1.04582,1.31102,1.3451,1.89279,0.466176,0.880792,0.567139,1.3591,1.19211,0.356127,0.446713,1.63157,1.74161,0.198557,0.573037,1.82283,0.181068,0.71155,0.183613,0.596359,0.56774,1.02815,0.475456,0.434795,0.303731,1.76786,0.147978,0.567042,3.59143,0.237016,1.07907,0.0650334,0.766743,0.613197,1.66673,0.436797,1.13999]
loss_val = [15.9075,7.81171,7.9644,3.49974,3.15895,4.38955,4.15697,6.17876,3.4366,2.46914,3.41426,3.23049,5.30429,3.19208,2.0779,2.56253,3.2619,5.04907,3.0515,2.65809,2.08003,2.77029,3.90597,2.83559,2.96545,1.90817,2.23167,3.83688,3.1674,3.25176,2.02111,1.89907,3.05517,2.24733,3.50986,1.36536,1.36045,1.30455,1.27106,1.29183,1.22332,1.25968,1.20373,1.21101,1.2103,1.16807,1.19542,1.14616,1.15033,1.14135,1.09643,1.12605,1.08845,1.10505,1.09303,1.07197,1.10399,1.05564,1.09022,1.05491,1.03748,1.06291,1.02575,1.04434,1.01115,1.00698,1.02612,0.975953,1.02462,0.981691,1.01163,0.996219,0.988349,0.991318,0.985924,0.985863,0.986974,0.981544,0.98296,0.979165,0.980247,0.988261,0.988905,0.982036,1.04112,1.05818,0.986896,1.00126,1.02393,0.982019,1.03436,1.07273,0.982022,0.98988,0.994627,0.982016,0.99697,0.982016,1.00086,0.982427]
accu_val = [0.509524,0.519048,0.62381,0.72619,0.769047,0.72619,0.709524,0.669048,0.692857,0.802381,0.778571,0.757143,0.704762,0.695238,0.821428,0.795238,0.740476,0.711905,0.7,0.795238,0.828571,0.769048,0.757143,0.723809,0.780953,0.82619,0.816667,0.761905,0.72381,0.773809,0.807143,0.838095,0.795238,0.802381,0.771429,0.864286,0.861905,0.869048,0.869048,0.866667,0.871429,0.871429,0.871429,0.871429,0.869048,0.871429,0.869048,0.869048,0.871429,0.871429,0.87381,0.871429,0.873809,0.873809,0.871429,0.87381,0.873809,0.871429,0.871429,0.871429,0.87381,0.87381,0.87381,0.87381,0.87381,0.87381,0.876191,0.873809,0.876191,0.876191,0.87381,0.87381,0.878572,0.878571,0.878572,0.878571,0.878571,0.87619,0.876191,0.876191,0.87381,0.87381,0.87381,0.878571,0.871429,0.87381,0.876191,0.87619,0.876191,0.878571,0.87619,0.87381,0.878571,0.876191,0.873809,0.878571,0.873809,0.878571,0.87619,0.878571]

plotter = LossAccPlotter(
			title="Text/No-Text classifier loss and accuracy graph",
			save_to_filepath="./cnn_acc_loss.png",
			show_regressions=False,
			show_averages=True,
			show_loss_plot=True,
			show_acc_plot=True,
			show_plot_window=False,
			x_label="Iteration Count")

# add them all
for iteration in range(20000):
	if iteration%40 == 0:	
		# deactivate redrawing after each update
		plotter.add_values(iteration, loss_train=loss_train[iteration/40], redraw=False)

	if iteration%200 == 0:
		# deactivate redrawing after each update
		plotter.add_values(iteration, loss_val=loss_val[iteration/200], acc_val = accu_val[iteration/200], redraw=False)

# redraw once at the end
plotter.redraw()

plotter.block()
Пример #9
0
def main():
    rand.seed()
    #env = gym.make('Asteroids-v0')
    env = gym.make('Breakout-v0')
    num_actions = env.action_space.n
    #print(num_actions)
    
    plotter = LossAccPlotter(title="mem256_perGame",show_acc_plot=True,save_to_filepath="/mem256_perGame.png",show_loss_plot=False)
    plotter2 = LossAccPlotter(title="mem256_100",show_acc_plot=True,save_to_filepath="/mem256_100.png",show_loss_plot=False)
    #plotter.save_plot("./mem256.png")
    
    observation = env.reset()
    #observation = downsample(observation)
    #reward = 0
    action = 0
    total_reward = 0
    total_reward2 = 0
    env.render()
    prev_obs = []
    curr_obs = []
    D = []
    step = 0
    rate = 1
    sess, output_net, x, cost, trainer, mask, reward, nextQ = initialize()
    #load(sess)
    startPrinting = False
    for i in range(5):
        observation, rw, done, info = env.step(action)  # pass in 0 for action
        observation = convert_to_small_and_grayscale(observation)
        prev_obs = deepcopy(curr_obs)
        curr_obs = obsUpdate(curr_obs,observation)
        #e = [rw, action, deepcopy(prev_obs), deepcopy(curr_obs)]
        #D.append(e)
        action = 1
        #print(i)
    print("Entering mini-loop")
    for _ in range(10):
        step +=1
        #print(step)
        if done:
            observation = env.reset()
        if (len(D) > 256):
            D.pop()
        if step % 1000 == 0:
            rate = rate / 2
            if (rate < 0.05):
                rate=0.05
        #if step % 1000 == 0:
            #save(sess)
        action = magic(curr_obs, sess, output_net, x,step,rate, False) #change this to just take in curr_obs, sess, and False
        #action = env.action_space.sample()
        env.render()
        observation, rw, done, info = env.step(action) # take a random action
        #print(action, rw, step)
        observation = convert_to_small_and_grayscale(observation)
        e = [rw, action, deepcopy(prev_obs), deepcopy(curr_obs)]
        D.append(e)
        prev_obs = deepcopy(curr_obs)
        curr_obs = obsUpdate(curr_obs,observation)
    print("Entering full loop")
    while step < 10001:
        step +=1
        
        #print(step)
        if done:
            print("saving to plot....")
            plot_reward = total_reward
            plotter.add_values(step, acc_train=plot_reward)
            total_reward = 0
            observation = env.reset()
        if (len(D) > 256):
            D.pop()
        if step % 100 == 0:
            print(step,"steps have passed")
            save(sess, step)
            rate = rate / 2
            startPrinting = True
            if (rate < 0.05):
                rate=0.05
            print("saving to plot2....")
            plot_reward = total_reward2/100
            plotter2.add_values(step, acc_train=plot_reward)
            total_reward2 = 0
            #print(step,"steps have passed")
        if step % 500 == 0:
            plotter.save_plot("./mem256_perGame.png")
            plotter2.save_plot("./mem256_100.png")
        action = magic(curr_obs, sess, output_net, x,step,rate, startPrinting) #change this to just take in curr_obs, sess, and False
        #action = env.action_space.sample()
        env.render()
        observation, rw, done, info = env.step(action) # take a random action
        #print(action, rw, step)
        observation = convert_to_small_and_grayscale(observation)
        e = [rw, action, deepcopy(prev_obs), deepcopy(curr_obs)]
        D.insert(0,e)
        prev_obs = deepcopy(curr_obs)
        curr_obs = obsUpdate(curr_obs,observation)
        update_q_function(D, sess, output_net, x, cost, trainer, mask, reward, nextQ)
        total_reward = total_reward + rw
        total_reward2 = total_reward2 + rw
    plotter.block()
    plotter2.block()
Пример #10
0
def train_model(num_tasks,
                models,
                dataloaders,
                dataset_sizes,
                criterion,
                optimizers,
                schedulers,
                epochs=15):
    since = time.time()
    num_tasks = num_tasks
    use_gpu = torch.cuda.is_available()
    final_outputs = [
    ]  #this is the variable to accumulate the outputs of all columns for each task
    middle_outputs = [
    ]  #this is the variable for keeping outputs of current task in each column

    #we iterate for each task
    for task_id in range(num_tasks):
        #everytime we do a new task, we empty the final outputs
        final_outputs[:] = []

        # we now iterate for each previous column until the one of our task
        for i in range(0, task_id + 1):
            #we save the weights with best results
            best_model_wts = copy.deepcopy(models[task_id][i].state_dict())
            model = models[task_id][i]
            optimizer = optimizers[i]
            scheduler = schedulers[i]
            # if it's not the column corresponding to the task, do not train
            if task_id != i:
                #this is the case for "previous" columns so we only need to pass data, not train
                num_epochs = 1
                middle_outputs[:] = []
            else:
                num_epochs = epochs
            dataloader = dataloaders[i]
            dataset_size = dataset_sizes[i]
            best_acc = 0.0
            # let's add a plotter for loss and accuracy
            plotter = LossAccPlotter()
            for epoch in range(num_epochs):
                print('Epoch {}/{}'.format(epoch, num_epochs - 1))
                print('-' * 10)
                # Each epoch has a training and validation phase
                for phase in ['train', 'val']:
                    if phase == 'train':
                        scheduler.step()
                        model.train(True)  # Set model to training mode
                    else:
                        model.train(False)  # Set model to evaluate mode

                    running_loss = 0.0
                    running_corrects = 0

                    # Iterate over data.
                    #tqdm shows a progression bar
                    for data in tqdm(dataloader[phase]):
                        # get the inputs
                        inputs, labels = data
                        if inputs.type() != int and labels.type() != int:
                            # wrap them in Variable
                            if use_gpu:
                                inputs = Variable(inputs.cuda())
                                labels = Variable(labels.cuda())

                            else:
                                inputs, labels = Variable(inputs), Variable(
                                    labels)

                            # zero the parameter gradients
                            optimizer.zero_grad()

                            # forward
                            outputs, middle_outputs = model(
                                inputs, final_outputs)
                            #we save the outputs of this column in middle outputs and we have previous columns in final
                            _, preds = torch.max(outputs.data, 1)
                            loss = criterion(outputs, labels)

                            # backward + optimize only if in training phase
                            if phase == 'train':
                                loss.backward()
                                optimizer.step()

                            # statistics
                            running_loss += loss.data[0] * inputs.size(0)
                            running_corrects += torch.sum(preds == labels.data)

                    epoch_loss = running_loss / dataset_size[phase]
                    epoch_acc = running_corrects / dataset_size[phase]

                    print('{} Loss: {:.4f} Acc: {:.4f}'.format(
                        phase, epoch_loss, epoch_acc))

                    if phase == 'train':
                        plotter.add_values(epoch,
                                           loss_train=epoch_loss,
                                           acc_train=epoch_acc,
                                           redraw=False)
                    else:
                        plotter.add_values(epoch,
                                           loss_val=epoch_loss,
                                           acc_val=epoch_acc,
                                           redraw=False)
                    # deep copy the model
                    if phase == 'val' and epoch_acc > best_acc:
                        best_acc = epoch_acc
                        best_model_wts = copy.deepcopy(model.state_dict())

                print()
                plotter.redraw()
            if task_id != i:
                final_outputs.append(middle_outputs)
                #we add the ouput of this column to final outputs

        plotter.save_plot('plots%d.%d.png' % (task_id, i))
        print('Best val Acc: {:4f}'.format(best_acc))

        # load best model weights
        models[task_id][i].load_state_dict(best_model_wts)

    time_elapsed = time.time() - since
    print('Training complete in {:.0f}m {:.0f}s'.format(
        time_elapsed // 60, time_elapsed % 60))

    return models
Пример #11
0
def main():
    """Run various checks on the LossAccPlotter.
    They all follow the same pattern: Generate some random data (lines)
    to display. Then display them (using various settings).
    """
    print("")
    print("------------------")
    print("1 datapoint")
    print("------------------")
    # generate example values for: loss train, loss validation, accuracy train
    # and accuracy validation
    (loss_train, loss_val, acc_train, acc_val) = create_values(1)

    # generate a plot showing the example values
    show_chart(loss_train, loss_val, acc_train, acc_val,
               title="A single datapoint")

    print("")
    print("------------------")
    print("150 datapoints")
    print("Saved to file 'plot.png'")
    print("------------------")
    (loss_train, loss_val, acc_train, acc_val) = create_values(150)
    show_chart(loss_train, loss_val, acc_train, acc_val,
               lap=LossAccPlotter(save_to_filepath="plot.png"),
               title="150 datapoints, saved to file 'plot.png'")

    print("")
    print("------------------")
    print("150 datapoints")
    print("No accuracy chart")
    print("------------------")
    (loss_train, loss_val, _, _) = create_values(150)
    show_chart(loss_train, loss_val, np.array([]), np.array([]),
               lap=LossAccPlotter(show_acc_plot=False),
               title="150 datapoints, no accuracy chart")

    print("")
    print("------------------")
    print("150 datapoints")
    print("No loss chart")
    print("------------------")
    (_, _, acc_train, acc_val) = create_values(150)
    show_chart(np.array([]), np.array([]), acc_train, acc_val,
               lap=LossAccPlotter(show_loss_plot=False),
               title="150 datapoints, no loss chart")

    print("")
    print("------------------")
    print("150 datapoints")
    print("No accuracy chart")
    print("------------------")
    (loss_train, loss_val, _, _) = create_values(150)
    show_chart(loss_train, loss_val, np.array([]), np.array([]),
               lap=LossAccPlotter(show_acc_plot=False),
               title="150 datapoints, no accuracy chart")

    print("")
    print("------------------")
    print("150 datapoints")
    print("Only validation values (no training lines)")
    print("------------------")
    (_, loss_val, _, acc_val) = create_values(150)
    show_chart(np.array([]), loss_val, np.array([]), acc_val,
               title="150 datapoints, only validation (no training)")

    print("")
    print("------------------")
    print("150 datapoints")
    print("No regressions")
    print("------------------")
    (loss_train, loss_val, acc_train, acc_val) = create_values(150)
    show_chart(loss_train, loss_val, acc_train, acc_val,
               lap=LossAccPlotter(show_regressions=False),
               title="150 datapoints, regressions deactivated")

    print("")
    print("------------------")
    print("150 datapoints")
    print("No averages")
    print("------------------")
    (loss_train, loss_val, acc_train, acc_val) = create_values(150)
    show_chart(loss_train, loss_val, acc_train, acc_val,
               lap=LossAccPlotter(show_averages=False),
               title="150 datapoints, averages deactivated")

    print("")
    print("------------------")
    print("150 datapoints")
    print("x-index 5 of loss_train should create a warning as its set to NaN")
    print("------------------")
    (loss_train, loss_val, acc_train, acc_val) = create_values(150)

    # this should create a warning when LossAccPlotter.add_values() gets called.
    loss_train[5] = float("nan")

    show_chart(loss_train, loss_val, acc_train, acc_val,
               title="150 datapoints, one having value NaN (loss train at x=5)")

    print("")
    print("------------------")
    print("1000 datapoints training")
    print("100 datapoints validation")
    print("------------------")
    nb_points_train = 1000
    nb_points_val = 100
    (loss_train, loss_val, acc_train, acc_val) = create_values(nb_points_train)

    # set 9 out of 10 values of the validation arrays to -1.0 (Which will be
    # interpreted as None in show_chart(). Numpy doesnt support None directly,
    # only NaN, which is already used before to check whether the Plotter
    # correctly creates a warning if any data point is NaN.)
    all_indices = np.arange(0, nb_points_train-1, 1)
    keep_indices = np.arange(0, nb_points_train-1, int(nb_points_train / nb_points_val))
    set_to_none_indices = np.delete(all_indices, keep_indices)
    loss_val[set_to_none_indices] = -1.0
    acc_val[set_to_none_indices] = -1.0

    show_chart(loss_train, loss_val, acc_train, acc_val,
               title="1000 training datapoints, but only 100 validation datapoints")

    print("")
    print("------------------")
    print("5 datapoints")
    print("slowly added, one by one")
    print("------------------")
    (loss_train, loss_val, acc_train, acc_val) = create_values(5)
    lap = LossAccPlotter(title="5 datapoints, slowly added one by one")

    for idx in range(loss_train.shape[0]):
        lap.add_values(idx,
                       loss_train=loss_train[idx], loss_val=loss_val[idx],
                       acc_train=acc_train[idx], acc_val=acc_val[idx],
                       redraw=True)
        sleep(1.0)

    print("Close the chart to continue.")
    lap.block()
Пример #12
0
    if option.is_gd():
        print("\tOptimization: Gradient Descent")
    elif option.is_sgd():
        print("\tOptimization: Stochastic Gradient Descent")

    if option.is_linear():
        print("\tActivation Function: Linear")
    elif option.is_sigmoid():
        print("\tActivation Function: Sigmoid")

    if option.is_l2norm():
        print("\tRegularization: L2Norm")
    elif option.is_dropout():
        print("\tRegularization: Drop out")

    net = Network(training, test, option)
    if read_weights_from_file == "y":
        loaded = np.load('weights.npz')
        net.set_hid_weights(loaded['hid_weights']).set_out_weights(
            loaded['out_weights']).set_hid_bias(
                loaded['hid_bias']).set_out_bias(loaded['out_bias'])

    plotter = LossAccPlotter(show_acc_plot=False)
    for i in range(NUM_OF_ITER):
        training_loss = 0
        if read_weights_from_file != "y":
            training_loss = net.train(i == NUM_OF_ITER - 1)
        test_loss = net.test(i == NUM_OF_ITER - 1)
        plotter.add_values(i, loss_train=training_loss, loss_val=test_loss)
    plotter.block()
Пример #13
0
                train_acc.append(None)
                val_acc_top1.append(None)
                val_acc_top5.append(None)
                count_acc += 1
    print("Start plotting")
    # print(len(loss))
    # print(train_acc)
    # print(val_acc_top1)
    # print(val_acc_top5)

    plot_title = 'Loss and Accuracy Plotted Using ' + num_classes + ' Keywords (' + num_train_per_class + ' training videos each) and minibatch size of ' + batch_size
    plotter = LossAccPlotter(title=plot_title,
                             save_to_filepath=num_classes + '_' +
                             num_train_per_class + '_' + batch_size + '.png',
                             show_regressions=False,
                             show_averages=False,
                             show_loss_plot=True,
                             show_acc_plot=True,
                             show_plot_window=True,
                             x_label="Epoch")

    for epoch in range(len(loss)):
        plotter.add_values(epoch,
                           loss_train=loss[epoch],
                           acc_train=train_acc[epoch],
                           acc_val=val_acc_top1[epoch])

    plotter.block()

    print("Finished plotting")
Пример #14
0
def main():
    """Create the example plots in the following way:
    1. Generate example data (all plots use more or less the same data)
    2. Generate plot 1: "standard" example with loss and accuracy
    3. Generate plot 2: Same as 1, but only loss / no accuracy
    4. Generate plot 3: Same as 1, but no validation lines (only training dataset)
    5. Generate plot 4: Same as 1, but use only every 10th validation datapoint
                        (i.e. resembles real world scenario where you rarely
                        validate your machine learning method)
    """
    nb_points = 500

    loss_train = add_noise(np.linspace(0.9, 0.1, num=nb_points), 0.025)
    loss_val = add_noise(np.linspace(0.7, 0.3, num=nb_points), 0.045)
    acc_train = add_noise(np.linspace(0.52, 0.95, num=nb_points), 0.025)
    acc_val = add_noise(np.linspace(0.65, 0.75, num=nb_points), 0.045)

    # Normal example plot
    lap = LossAccPlotter(save_to_filepath="example_plot.png")
    show_chart(loss_train,
               loss_val,
               acc_train,
               acc_val,
               lap=lap,
               title="Example Plot with Loss and Accuracy")

    # Plot showing only the results of the loss function (accuracy off)
    lap = LossAccPlotter(show_acc_plot=False,
                         save_to_filepath="example_plot_loss.png")
    show_chart(loss_train,
               loss_val,
               acc_train,
               acc_val,
               lap=lap,
               title="Example Plot, only Loss Function")

    # Plot showing only training dataset values (but for both loss and accuracy)
    lap = LossAccPlotter(save_to_filepath="example_plot_only_training.png")
    show_chart(
        loss_train,
        np.array([]),
        acc_train,
        np.array([]),
        lap=lap,
        title="Example Plot, only Training Dataset / no Validation Dataset")

    # Plot with a different update interval for training and validation dataset
    # (i.e. only one validation value for every 10 training values)
    #
    # Set 9 out of 10 validation values to -1, which will be transformed into
    # None in show_chart(). (same technique as in check_laplotter.py)
    nb_points_train = nb_points
    nb_points_val = int(nb_points * 0.1)
    all_indices = np.arange(0, nb_points_train - 1, 1)
    keep_indices = np.arange(0, nb_points_train - 1,
                             int(nb_points_train / nb_points_val))
    set_to_none_indices = np.delete(all_indices, keep_indices)
    loss_val[set_to_none_indices] = -1.0
    acc_val[set_to_none_indices] = -1.0
    lap = LossAccPlotter(show_acc_plot=False,
                         save_to_filepath="example_plot_update_intervals.png")
    show_chart(loss_train, loss_val, acc_train, acc_val, lap=lap,
               title="Example Plot with different Update Intervals for Training " \
                     "and Validation Datasets")
Пример #15
0
def train(ngram, name, bar, drop_out, dataset, is_cuda=False, edges=False):
    plotter = LossAccPlotter(title="This is an example plot",
                             save_to_filepath="/tmp/my_plot.png",
                             show_regressions=True,
                             show_averages=True,
                             show_loss_plot=True,
                             show_acc_plot=True,
                             show_plot_window=False,
                             x_label="Epoch")

    print('load data helper.')
    data_helper = DataHelper(mode='train')
    b_size = len(data_helper.label)
    print('*' * 100)
    print('train set total:', b_size)

    if os.path.exists(os.path.join('.',
                                   name + '.pkl')) and name != 'temp_model':
        print('load model from file')
        model = torch.load(os.path.join('.', name + '.pkl'))

    else:
        print('new model')
        if name == 'temp_model':
            name == 'temp_model'

        edges_weights, edges_mappings, count = cal_PMI()

        ## -----------------************************** import the datahelper class to get the vocab-5 doc*****************------------------------

        ## class_num = len(data_helper.labels_str) is changed, consider just a score
        model = Model(class_num=data_helper.labels_str,
                      hidden_size_node=200,
                      vocab=data_helper.vocab,
                      n_gram=ngram,
                      drop_out=drop_out,
                      edges_matrix=edges_mappings,
                      edges_num=count,
                      trainable_edges=edges,
                      pmi=edges_weights,
                      cuda=is_cuda)
        ### --------------------------------------- ###
        print(model)

        if is_cuda:
            print('cuda')
            model.cuda()
        loss_func = torch.nn.MSELoss()
        loss_mae = torch.nn.L1Loss(reduction='sum')
        optim = torch.optim.Adam(model.parameters(), weight_decay=1e-3)
        iter = 0
        if bar:
            pbar = tqdm.tqdm(total=NUM_ITER_EVAL)

        best_acc = 0.0
        last_best_epoch = 0
        start_time = time.time()
        total_loss = 0.0
        total_correct = 0
        total = 0
        accuracy = 0.0
        num_epoch = 500
        weight_decays = 1e-4
        for content, label, epoch in data_helper.batch_iter(
                batch_size=32, num_epoch=num_epoch):
            improved = ''
            model.train()
            pred = model(content)
            pred_sq = torch.squeeze(pred, 1)
            l2_reg = 0.5 * weight_decays * (
                model.seq_edge_w.weight.to('cpu').detach().numpy()**2).sum()

            loss = loss_func(pred_sq, label.float()) + l2_reg

            #-------------------------------------------#
            error = loss_mae(pred_sq.cpu().data, label.cpu())
            accuracy += error
            total += len(pred)  ##batch size = len(label)
            total_loss += (loss.item() * len(pred))
            total_correct += loss.item()
            optim.zero_grad()
            loss.backward()
            optim.step()

            iter += 1
            if bar:
                pbar.update()

            if iter % NUM_ITER_EVAL == 0:
                if bar:
                    pbar.close()

                val_acc, val_loss = dev(model)

                if val_acc < best_acc:
                    best_acc = val_acc
                    last_best_epoch = epoch
                    improved = '* '
                    torch.save(model, name + '.pkl')

                msg = 'Epoch: {0:>6} Iter: {1:>6}, Train Loss: {5:>7.2}, Train Error: {6:>7.2}' \
                      + 'Val Acc: {2:>7.2}, Time: {3}{4}, val error:{7:>7.2}' \
                  # + ' Time: {5} {6}'

                print(
                    msg.format(epoch, iter, val_acc, get_time_dif(start_time),
                               improved, total_correct / (NUM_ITER_EVAL),
                               float(accuracy) / float(total), val_loss))

                plotter.add_values(epoch,
                                   loss_train=total_correct / (NUM_ITER_EVAL),
                                   acc_train=float(accuracy) / float(total),
                                   loss_val=val_loss,
                                   acc_val=best_acc)

                total_loss = 0.0
                total_correct = 0
                accuracy = 0.0
                total = 0
                if bar:
                    pbar = tqdm.tqdm(total=NUM_ITER_EVAL)

            plotter.block()
    return name
Пример #16
0
class LSTM:
    def __init__(self, args, data, tuning):
        self.FLAGS = args
        self.data = data
        self.tuning = tuning
        self.embedding_init = embed(self.data)

        self.model = LSTMClassifier(self.FLAGS, self.embedding_init)
        logits = self.model.inference()
        self.train_loss = self.model.loss(logits)
        self.train_op = self.model.training(self.train_loss[0])

        pred = self.model.inference(forward_only=True)
        self.test_loss = self.model.loss(pred, forward_only=True)

        # Visualizing loss function and accuracy during training over epochs
        self.plotter = LossAccPlotter(title="Training plots",
                                      save_to_filepath="../img/lstm_plot.png",
                                      show_regressions=False,
                                      show_averages=False,
                                      show_loss_plot=True,
                                      show_acc_plot=True,
                                      show_plot_window=True,
                                      x_label="Epoch")

        self.init = tf.group(tf.global_variables_initializer(),
                             tf.local_variables_initializer())
        print("Network initialized..")

    def train_lstm(self, FLAGS, data):
        model = self.model
        scoring_list = []
        best_eval_score = []

        with tf.Session(config=tf.ConfigProto(
                allow_soft_placement=FLAGS.allow_soft_placement,
                log_device_placement=FLAGS.log_device_placement)) as sess:
            t0 = time.time()
            saver = tf.train.Saver()
            writer = tf.summary.FileWriter("../output")
            if FLAGS.restore and FLAGS.checkpoint_file:
                print()
                print("Loading model from '%s' .." % FLAGS.checkpoint_file)
                print()
                saver.restore(sess, FLAGS.checkpoint_file)
                # feed = {model.x: data.dev_x, model.y: data.dev_y, model.seq_len: data.dev_size}
                feed = {
                    model.x: data.test_x,
                    model.y: data.test_y,
                    model.seq_len: data.test_size
                }
                test_loss_value, test_score = self.final_test(sess, feed)

                return test_score
            else:
                sess.run(self.init)
                coord = tf.train.Coordinator()
                threads = tf.train.start_queue_runners(sess=sess, coord=coord)

                np.random.seed(FLAGS.random_state)
                for epoch in range(FLAGS.max_epoch):
                    data.reset_batch_pointer()

                    total_loss = 0.0
                    total_acc = 0.0
                    for step in range(data.num_batches):
                        x, y, seq_length, _, _, _, _, _ = data.next_batch()
                        feed = {
                            model.x: x,
                            model.y: y,
                            model.seq_len: seq_length
                        }
                        sess.run(self.train_op, feed)
                        current_loss, current_acc, _ = sess.run(
                            self.train_loss, feed)
                        total_loss += current_loss
                        total_acc += current_acc

                    if not self.tuning:
                        print()
                        print(
                            "Epoch {:2d}: Training loss = {:.6f}; Training Accuracy = {:.5f}"
                            .format(epoch + 1, total_loss / data.num_batches,
                                    total_acc / data.num_batches))

                    feed = {
                        model.x: data.dev_x,
                        model.y: data.dev_y,
                        model.seq_len: data.dev_size
                    }
                    dev_loss_value, dev_score, early_stop, eval_summary = self.eval(sess, feed, saver, FLAGS.early_stopping_rounds, \
                                                              scoring_list, False, FLAGS.scoring_metrics)
                    best_eval_score.append(dev_score)
                    # str_summary_type = 'train'
                    # loss_summ = tf.scalar_summary("{0}_loss".format(str_summary_type), self.total_train_loss.assign(total_loss/data.num_batches))
                    # acc_summ = tf.scalar_summary("{0}_accuracy".format(str_summary_type), self.total_train_acc.assign(total_acc/data.num_batches))
                    # train_summary = tf.merge_summary([loss_summ, acc_summ])
                    # writer.add_summary(train_summary, epoch)
                    # writer.add_summary(eval_summary, epoch)

                    self.plotter.add_values(
                        epoch,
                        loss_train=total_loss / data.num_batches,
                        acc_train=total_acc / data.num_batches,
                        loss_val=dev_loss_value,
                        acc_val=dev_score[0])

                    if early_stop:
                        print('Early stopping after %s epoches...' %
                              str(epoch))
                        best_eval_score = max(best_eval_score,key=itemgetter(1)) if FLAGS.scoring_metrics=='3classf1' else \
                                          max(best_eval_score,key=itemgetter(0)) if FLAGS.scoring_metrics=='accuracy' \
                                          else max(best_eval_score,key=itemgetter(2))
                        print(
                            "Final dev loss = {:.5f}; Dev Accuracy = {:.5f}; 3-class F1 = {:.5f}; 2-class F1 = {:.5f}"
                            .format(dev_loss_value, best_eval_score[0],
                                    best_eval_score[1], best_eval_score[2]))
                        if not self.tuning:
                            t1 = time.time()
                            print("time taken: %f mins" % ((t1 - t0) / 60))
                        break

                writer.close()
                feed = {
                    model.x: data.test_x,
                    model.y: data.test_y,
                    model.seq_len: data.test_size
                }
                test_loss_value, test_score = self.final_test(sess, feed)

                coord.request_stop()
                coord.join(threads)

        # if not FLAGS.restore:
        # 	self.plotter.block()

            return test_score, best_eval_score

    def eval(self,
             session,
             feed,
             saver,
             early_stopping_rounds,
             early_stopping_metric_list,
             early_stopping_metric_minimize=False,
             metrics='accuracy'):
        test_loss_value, acc_test, pred, eval_summary = session.run(
            self.test_loss, feed)
        f1_3class, f1_2class = fscores(self.data.dev_y, pred)
        if not self.tuning:
            print(
                "*** Validation Loss = {:.6f}; Validation Accuracy = {:.5f}; 3-class F1 = {:.5f}; 2-class F1 = {:.5f}"
                .format(test_loss_value, acc_test, f1_3class, f1_2class))
            print()
        early_stop = False
        early_stopping_score = -1
        if metrics == 'accuracy':
            early_stopping_score = acc_test
            early_stopping_metric_list.append(acc_test)
        elif metrics == '3classf1':
            early_stopping_score = f1_3class
            early_stopping_metric_list.append(f1_3class)
        elif metrics == '2classf1':
            early_stopping_score = f1_2class
            early_stopping_metric_list.append(f1_2class)
        assert early_stopping_score > 0

        if (not self.FLAGS.restore) and (early_stopping_metric_minimize
                                         ):  # For minimising the eval score
            if all(early_stopping_score <= i
                   for i in early_stopping_metric_list):
                saver.save(session, self.FLAGS.checkpoint_file)
                best_eval_score = (acc_test, f1_3class, f1_2class)
            if early_stopping_metric_list[::-1].index(
                    min(early_stopping_metric_list)) > early_stopping_rounds:
                early_stop = True
            return (test_loss_value, (acc_test, f1_3class, f1_2class),
                    early_stop)
        elif not (self.FLAGS.restore and early_stopping_metric_minimize
                  ):  # For maximising the eval score
            if all(early_stopping_score >= i
                   for i in early_stopping_metric_list):
                saver.save(session, self.FLAGS.checkpoint_file)
                best_eval_score = (acc_test, f1_3class, f1_2class)
            if early_stopping_metric_list[::-1].index(
                    max(early_stopping_metric_list)) > early_stopping_rounds:
                early_stop = True
            return (test_loss_value, (acc_test, f1_3class, f1_2class),
                    early_stop, eval_summary)

    def final_test(self, session, feed):
        tf.train.Saver().restore(session, self.FLAGS.checkpoint_file)
        test_loss_value, acc_test, pred, _ = session.run(self.test_loss, feed)
        true_labels = self.data.test_y
        f1_3class, f1_2class = fscores(true_labels, pred)
        print(
            "****** Final test Loss = {:.6f}; Test Accuracy = {:.5f}; 3-class F1 = {:.5f}; 2-class F1 = {:.5f}"
            .format(test_loss_value, acc_test, f1_3class, f1_2class))
        print()
        return (test_loss_value, (acc_test, f1_3class, f1_2class))
Пример #17
0
def main():
    """Run various checks on the LossAccPlotter.
    They all follow the same pattern: Generate some random data (lines)
    to display. Then display them (using various settings).
    """
    print("")
    print("------------------")
    print("1 datapoint")
    print("------------------")
    # generate example values for: loss train, loss validation, accuracy train
    # and accuracy validation
    (loss_train, loss_val, acc_train, acc_val) = create_values(1)

    # generate a plot showing the example values
    show_chart(loss_train,
               loss_val,
               acc_train,
               acc_val,
               title="A single datapoint")

    print("")
    print("------------------")
    print("150 datapoints")
    print("Saved to file 'plot.png'")
    print("------------------")
    (loss_train, loss_val, acc_train, acc_val) = create_values(150)
    show_chart(loss_train,
               loss_val,
               acc_train,
               acc_val,
               lap=LossAccPlotter(save_to_filepath="plot.png"),
               title="150 datapoints, saved to file 'plot.png'")

    print("")
    print("------------------")
    print("150 datapoints")
    print("No accuracy chart")
    print("------------------")
    (loss_train, loss_val, _, _) = create_values(150)
    show_chart(loss_train,
               loss_val,
               np.array([]),
               np.array([]),
               lap=LossAccPlotter(show_acc_plot=False),
               title="150 datapoints, no accuracy chart")

    print("")
    print("------------------")
    print("150 datapoints")
    print("No loss chart")
    print("------------------")
    (_, _, acc_train, acc_val) = create_values(150)
    show_chart(np.array([]),
               np.array([]),
               acc_train,
               acc_val,
               lap=LossAccPlotter(show_loss_plot=False),
               title="150 datapoints, no loss chart")

    print("")
    print("------------------")
    print("150 datapoints")
    print("No accuracy chart")
    print("------------------")
    (loss_train, loss_val, _, _) = create_values(150)
    show_chart(loss_train,
               loss_val,
               np.array([]),
               np.array([]),
               lap=LossAccPlotter(show_acc_plot=False),
               title="150 datapoints, no accuracy chart")

    print("")
    print("------------------")
    print("150 datapoints")
    print("Only validation values (no training lines)")
    print("------------------")
    (_, loss_val, _, acc_val) = create_values(150)
    show_chart(np.array([]),
               loss_val,
               np.array([]),
               acc_val,
               title="150 datapoints, only validation (no training)")

    print("")
    print("------------------")
    print("150 datapoints")
    print("No regressions")
    print("------------------")
    (loss_train, loss_val, acc_train, acc_val) = create_values(150)
    show_chart(loss_train,
               loss_val,
               acc_train,
               acc_val,
               lap=LossAccPlotter(show_regressions=False),
               title="150 datapoints, regressions deactivated")

    print("")
    print("------------------")
    print("150 datapoints")
    print("No averages")
    print("------------------")
    (loss_train, loss_val, acc_train, acc_val) = create_values(150)
    show_chart(loss_train,
               loss_val,
               acc_train,
               acc_val,
               lap=LossAccPlotter(show_averages=False),
               title="150 datapoints, averages deactivated")

    print("")
    print("------------------")
    print("150 datapoints")
    print("x-index 5 of loss_train should create a warning as its set to NaN")
    print("------------------")
    (loss_train, loss_val, acc_train, acc_val) = create_values(150)

    # this should create a warning when LossAccPlotter.add_values() gets called.
    loss_train[5] = float("nan")

    show_chart(
        loss_train,
        loss_val,
        acc_train,
        acc_val,
        title="150 datapoints, one having value NaN (loss train at x=5)")

    print("")
    print("------------------")
    print("1000 datapoints training")
    print("100 datapoints validation")
    print("------------------")
    nb_points_train = 1000
    nb_points_val = 100
    (loss_train, loss_val, acc_train, acc_val) = create_values(nb_points_train)

    # set 9 out of 10 values of the validation arrays to -1.0 (Which will be
    # interpreted as None in show_chart(). Numpy doesnt support None directly,
    # only NaN, which is already used before to check whether the Plotter
    # correctly creates a warning if any data point is NaN.)
    all_indices = np.arange(0, nb_points_train - 1, 1)
    keep_indices = np.arange(0, nb_points_train - 1,
                             int(nb_points_train / nb_points_val))
    set_to_none_indices = np.delete(all_indices, keep_indices)
    loss_val[set_to_none_indices] = -1.0
    acc_val[set_to_none_indices] = -1.0

    show_chart(
        loss_train,
        loss_val,
        acc_train,
        acc_val,
        title="1000 training datapoints, but only 100 validation datapoints")

    print("")
    print("------------------")
    print("5 datapoints")
    print("slowly added, one by one")
    print("------------------")
    (loss_train, loss_val, acc_train, acc_val) = create_values(5)
    lap = LossAccPlotter(title="5 datapoints, slowly added one by one")

    for idx in range(loss_train.shape[0]):
        lap.add_values(idx,
                       loss_train=loss_train[idx],
                       loss_val=loss_val[idx],
                       acc_train=acc_train[idx],
                       acc_val=acc_val[idx],
                       redraw=True)
        sleep(1.0)

    print("Close the chart to continue.")
    lap.block()
Пример #18
0
def show_chart(loss_train, loss_val, acc_train, acc_val, lap=None, title=None):
    """Shows a plot using the LossAccPlotter and all provided values.

    Args:
        loss_train: y-values of the loss function of the training dataset.
        loss_val: y-values of the loss function of the validation dataset.
        acc_train: y-values of the accuracy of the training dataset.
        acc_val: y-values of the accuracy of the validation dataset.
        lap: A LossAccPlotter-Instance or None. If None then a new LossAccPlotter
            will be instantiated. (Default is None.)
        title: The title to use for the plot, i.e. LossAccPlotter.title .
    """
    lap = LossAccPlotter() if lap is None else lap

    # set the plot title, which will be shown at the very top of the plot
    if title is not None:
        lap.title = title

    # add loss train line/values
    for idx in range(loss_train.shape[0]):
        lt_val = loss_train[idx] if loss_train[idx] != -1.0 else None
        lap.add_values(idx, loss_train=lt_val, redraw=False)

    # add loss validation line/values
    for idx in range(loss_val.shape[0]):
        lv_val = loss_val[idx] if loss_val[idx] != -1.0 else None
        lap.add_values(idx, loss_val=lv_val, redraw=False)

    # add accuracy training line/values
    for idx in range(acc_train.shape[0]):
        at_val = acc_train[idx] if acc_train[idx] != -1.0 else None
        lap.add_values(idx, acc_train=at_val, redraw=False)

    # add accuracy validation line/values
    for idx in range(acc_val.shape[0]):
        av_val = acc_val[idx] if acc_val[idx] != -1.0 else None
        lap.add_values(idx, acc_val=av_val, redraw=False)

    # redraw once after adding all values, because that's significantly
    # faster than redrawing many times
    lap.redraw()

    # block at the end so that the plot does not close immediatly.
    print("Close the chart to continue.")
    lap.block()
Пример #19
0
def show_chart(loss_train, loss_val, acc_train, acc_val, lap=None, title=None):
    """Shows a plot using the LossAccPlotter and all provided values.

    Args:
        loss_train: y-values of the loss function of the training dataset.
        loss_val: y-values of the loss function of the validation dataset.
        acc_train: y-values of the accuracy of the training dataset.
        acc_val: y-values of the accuracy of the validation dataset.
        lap: A LossAccPlotter-Instance or None. If None then a new LossAccPlotter
            will be instantiated. (Default is None.)
        title: The title to use for the plot, i.e. LossAccPlotter.title .
    """
    lap = LossAccPlotter() if lap is None else lap

    # set the plot title, which will be shown at the very top of the plot
    if title is not None:
        lap.title = title

    # add loss train line/values
    for idx in range(loss_train.shape[0]):
        lt_val = loss_train[idx] if loss_train[idx] != -1.0 else None
        lap.add_values(idx, loss_train=lt_val, redraw=False)

    # add loss validation line/values
    for idx in range(loss_val.shape[0]):
        lv_val = loss_val[idx] if loss_val[idx] != -1.0 else None
        lap.add_values(idx, loss_val=lv_val, redraw=False)

    # add accuracy training line/values
    for idx in range(acc_train.shape[0]):
        at_val = acc_train[idx] if acc_train[idx] != -1.0 else None
        lap.add_values(idx, acc_train=at_val, redraw=False)

    # add accuracy validation line/values
    for idx in range(acc_val.shape[0]):
        av_val = acc_val[idx] if acc_val[idx] != -1.0 else None
        lap.add_values(idx, acc_val=av_val, redraw=False)

    # redraw once after adding all values, because that's significantly
    # faster than redrawing many times
    lap.redraw()

    # block at the end so that the plot does not close immediatly.
    print("Close the chart to continue.")
    lap.block()
Пример #20
0
def plot_image(title, filename, training, validation):
    plotter = LossAccPlotter(title,
                             save_to_filepath=os.path.join(
                                 input_dir, filename),
                             show_regressions=False,
                             show_averages=False,
                             show_loss_plot=True,
                             show_acc_plot=True,
                             show_plot_window=False,
                             x_label="Iteration")
    # Store counter for next validation row to plot
    next_validation_row_index_to_plot = 0

    for row in range(len(training)):
        training_record = training[row]
        training_iter = training_record[1]
        loss_train = training_record[2]

        # Plot Both Training and Validation Record
        if len(validation) > next_validation_row_index_to_plot and \
                validation[next_validation_row_index_to_plot][1] == training_iter:
            val_record = validation[next_validation_row_index_to_plot]
            next_validation_row_index_to_plot = next_validation_row_index_to_plot + 1
            loss_val = val_record[2]
            if len(val_record) == 3:
                plotter.add_values(training_iter,
                                   loss_train=loss_train,
                                   loss_val=loss_val,
                                   redraw=False)
            elif len(val_record) > 3:
                inst_acc_val = val_record[4]
                inst_acc_train = training_record[4]
                plotter.add_values(training_iter,
                                   loss_train=loss_train,
                                   loss_val=loss_val,
                                   acc_train=inst_acc_train,
                                   acc_val=inst_acc_val,
                                   redraw=False)
        else:  # Plot Training Record only
            if len(training_record) == 3:
                plotter.add_values(training_iter,
                                   loss_train=loss_train,
                                   redraw=False)
            elif len(training_record) > 3:  # Valid value
                inst_acc_train = training_record[4]
                plotter.add_values(training_iter,
                                   loss_train=loss_train,
                                   acc_train=inst_acc_train,
                                   redraw=False)

    plotter.redraw()
    plotter.block()