예제 #1
0
def obfuscate(args):
    # How market is affected by increasing obfuscation levels
    create_consumers_fn, create_market_fn = setup(args)
    ray.init()

    @ray.remote(num_cpus=1)
    def remote_simulate(consumer_fn, market_fn):
        return run_simulation(consumer_fn, market_fn, use_tqdm=False)

    obfuscation_mus = [i * 0.2 for i in range(40)]
    results = ray.get([
        remote_simulate.remote(
            create_consumers_fn, lambda: create_market_fn(
                obfuscation_dist=create_distribution({
                    'type': 'normal',
                    'mu': obsfuscation_mu,
                    'sigma': 0.5,
                    'lower_bound': 0.0
                }))) for obsfuscation_mu in obfuscation_mus
    ])
    for result_key in [
            'firm_profits', 'consumer_search_costs', 'consumer_search_count',
            'consumer_prices'
    ]:
        result = [mean(r[result_key]) for r in results]
        plot_result(x=obfuscation_mus,
                    y=result,
                    x_label='obfuscation_level',
                    y_label=result_key)
예제 #2
0
def test(model, opt):
    criterion = nn.CrossEntropyLoss()

    test_dataset = get_dataset(opt.data_name, opt.data_root, opt.image_size, train=False)
    test_loader = DataLoader(test_dataset, batch_size=1, shuffle=False)

    model.eval()
    for i, (images, labels) in enumerate(test_loader):
        images = images.to(opt.device)
        labels = labels.to(opt.device)
        images.requires_grad = True

        model.zero_grad()
        outputs = model(images)
        result = F.softmax(outputs, dim=1)
        loss = criterion(outputs, labels)
        loss.backward()

        images_grad = images.grad.data
        perturbed_images = fast_gradient(images, opt.epsilon, images_grad)
        perturbed_outputs = model(perturbed_images)
        attack_result = F.softmax(perturbed_outputs, dim=1)

        images = images.permute(0, 2, 3, 1).detach().cpu().numpy()
        result = result[0].detach().cpu().numpy()

        perturbed_images = perturbed_images.permute(0, 2, 3, 1).detach().cpu().numpy()
        attack_result = attack_result[0].cpu().detach().numpy()

        print(images.shape)
        print(result.shape)
        plot_result("./outputs/fgsm/{}.png".format(i), images, result, perturbed_images, attack_result)
예제 #3
0
def run(num_chromosomes, generations):
    cities, city_info, city_infoX, city_infoY = read_all_cities("TW319_368Addresses-no-far-islands.json")
    city_ids = list(range(len(cities)))
    random.seed()

    tsp_path = os.path.dirname(os.path.abspath(__file__))
    ocl_kernels = os.path.realpath(os.path.join(tsp_path, "..", "..", "kernel"))
    tsp_kernels = os.path.join(tsp_path, "kernel")

    sample = ShufflerChromosome([SimpleGene(v, city_ids) for v in city_ids])
    f = open(os.path.join(tsp_kernels, "taiwan_fitness.c"), "r")
    fstr = "".join(f.readlines())
    f.close()

    fstr = "#define TAIWAN_POINT_X {" + ", ".join([str(v) for v in city_infoX]) + "}\n" +\
           "#define TAIWAN_POINT_Y {" + ", ".join([str(v) for v in city_infoY]) + "}\n" +\
           fstr

    sample.use_improving_only_mutation("improving_only_mutation_helper")
    tsp_ga_cl = OpenCLGA(sample, generations, num_chromosomes, fstr, "taiwan_fitness", None,
                         [ocl_kernels])

    prob_mutate = 0.10
    prob_cross = 0.80
    tsp_ga_cl.run(prob_mutate, prob_cross)

    print("run took", tsp_ga_cl.elapsed_time, "seconds")
    best = tsp_ga_cl.best
    print("Shortest Path: " + " => ".join(cities[g]["name"] for g in best))

    utils.plot_result(city_info, best)
예제 #4
0
def main(args):
    env = gym.make('CartPole-v0')
    dim_state = env.observation_space.shape[0]
    dim_action = env.action_space.n

    actor = Actor(dim_state, args.dim_hidden, dim_action)
    critic = Critic(dim_state, args.dim_hidden)
    agent = ActorCriticAgent(env=env,
                             actor=actor,
                             critic=critic,
                             lr=args.lr,
                             gamma=args.gamma,
                             render=args.render)

    scores = 0
    history = []
    for i in range(args.n_episodes):
        scores += agent.run_episode()
        if (i + 1) % args.print_interval == 0:
            print(
                f"[Episode {i+1}] Avg Score: {scores / args.print_interval:.3f}"
            )
            history.append(scores / args.print_interval)
            scores = 0.0

    plot_result(history, args.print_interval)
예제 #5
0
def simulate_lac_operon():

    P_NAMES = ("i", "rI", "I", "Lac", "o", "RNAP", "A", "Y", "Z", "r", "ILac",
               "Io", "RNAPo")

    M, c, S, k = generate_lac_operon_instance()
    P_init = M
    k_guess = k

    T_max = 1000
    step_size = 4

    # simulate using Deterministic simulation (DSM)
    P_dsm, T_dsm = deterministic_simulation(lac_operon_odefun, P_init, T_max,
                                            step_size, k_guess)
    plot_result(T_dsm, P_dsm, title="Deterministic lac operon", legend=P_NAMES)

    # simulate using Gillespie
    simulate_many(S, M, lac_operon_hazards, c, T_max, P_NAMES, "Gillespie",
                  gillespieSSA, "Lac operon")

    # Linspace size
    Nt = 4000

    # simulate using the Poisson approximation method
    simulate_many(S, M, lac_operon_hazards, c, T_max, P_NAMES, "Poisson",
                  poisson_approx, "Lac operon", Nt)

    # simulate using the CLE method
    simulate_many(S, M, lac_operon_hazards, c, T_max, P_NAMES, "CLE", CLE,
                  "Lac operon", Nt)
    def train(self, inputs, targets, lr=1, batch_size=30, epochs=100, plot=False, kernel='linear'):

        self.batch_size = batch_size
        # init the kernel
        self.set_kernel(kernel)

        # set optimization method (Gradient Descent)
        self.optimization = tf.train.GradientDescentOptimizer(lr)
        self.training_step = self.optimization.minimize(self.loss)
        self.init = tf.global_variables_initializer()
        self.session.run(self.init)

        # set training data
        train_inputs, train_target = inputs, targets

        # performance tracking
        train_loss_result, train_accuracy_result = [], []

        # for each epoch
        for i in range(epochs):

            # generate random indexes for each batch
            batch_index = np.random.choice(len(train_inputs), size=batch_size)
            self.session.run(self.training_step, feed_dict={self.inputs: train_inputs[batch_index],
                                                            self.target: train_target[:, batch_index]})
            # if plotting, record every epoch
            if plot:
                # record accuracy
                train_accuracy, train_loss = self.generate_step_tracking_data(
                    train_inputs[batch_index], train_target[:, batch_index])
                train_accuracy_result.append(train_accuracy)
                train_loss_result.append(train_loss)

            if (i+1) % (epochs / 5) == 0:
                # if not plotting, get intermittent accuracy and loss
                if not plot:
                    # record accuracy
                    train_accuracy, train_loss = self.generate_step_tracking_data(
                        train_inputs[batch_index], train_target[:, batch_index])
                utl.print_progress(i, epochs, train_loss, train_accuracy)

        # plot results
        if plot:
            if not self.features == 2:
                print('Plotting only supported for 2 feature data sets... skipping output')
            else:
                utl.plot_loss(train_loss_result)
                utl.plot_accuracy(train_accuracy_result)
                grid = utl.generate_grid(train_inputs)
                grid_predictions = self.session.run(self.prediction, feed_dict={self.inputs: train_inputs[batch_index],
                                                                                self.target: train_target[:, batch_index],
                                                                                self.grid: grid})
                # plot the result grid
                utl.plot_result(grid_predictions, inputs, targets)

        # commit data points for the last support vectors used
        self.support_vector_data = [train_inputs[batch_index], train_target[:, batch_index]]
예제 #7
0
def do_sentiment(task_title, task_config_filename, do_print_code=False):
    st.title(task_title)

    config_names, config_map = task_configuration('assets/%s.json' %
                                                  task_config_filename)
    example = st.selectbox('Choose an example', config_names)
    # st.markdown(config_map[example][2], unsafe_allow_html=True)

    height = min((len(config_map[example][0].split()) + 1) * 2, 200)
    if config_map[example][4] == 'rtl':
        local_css('assets/rtl.css')

    sequence = st.text_area('Text',
                            config_map[example][0],
                            key='sequence',
                            height=height)
    labels = st.text_input('Labels (comma-separated)',
                           config_map[example][1],
                           max_chars=1000)
    original_labels = config_map[example][1].split(', ')

    labels = list(
        set([
            x.strip() for x in labels.strip().split(',') if len(x.strip()) > 0
        ]))

    if len(labels) == 0 or len(sequence) == 0:
        st.write('Enter some text and at least one label to see predictions.')
        return

    if not is_identical(labels, original_labels, 'list'):
        st.write('Your labels must be as same as the NLP task `%s`' %
                 task_title)
        return

    if st.button('Analyze'):
        if do_print_code:
            load_snippet('snippets/sentiment_analysis_code.txt', 'python')

        s = st.info('Predicting ...')
        model_config = load_config(config_map[example][3])
        labels = model_config.id2label

        tokenizer = load_tokenizer(config_map[example][3])
        model = load_model(config_map[example][3],
                           'TFAlbertForSequenceClassification')
        scores, prediction = sequence_predicting(model, tokenizer, sequence,
                                                 labels)
        time.sleep(1)
        s.empty()

        plot_result(list(labels.values()), scores, prediction)
예제 #8
0
def main(_):
    with tf.Session() as sess:
        # print("\nParamters used: ", args, "\n")
        logger.info("\nParamters used: {}\n".format(args))

        args.model_name = "mlp"
        baseline_model = MbPA(sess, args)
        args.model_name = "mbpa"
        mbpa_model = MbPA(sess, args)

        args.model_name = "mbpa_test"
        mbpa_test_model = MbPA_KNN_Test(sess, args)
        mnist = input_data.read_data_sets("mnist/", one_hot=True)

        task_permutation = []
        for task in range(args.num_tasks_to_run):
            task_permutation.append(np.random.permutation(784))

        # print("\nBaseline MLP training...\n")
        logger.info("\nBaseline MLP training...\n")
        start = time.time()
        performance_baseline = training(baseline_model, mnist,
                                        task_permutation, False)
        # performance_baseline = training_knn(baseline_model, mnist, task_permutation, False)
        end = time.time()
        time_needed_baseline = round(end - start)
        logger.info("Training time elapased: {}s".format(time_needed_baseline))

        # print("\nMemory-based parameter Adaptation....\n")
        logger.info("\nMemory-based parameter Adaptation....\n")
        start = time.time()
        mbpa_performance = training(mbpa_model, mnist, task_permutation, True)
        # mbpa_performance = training_knn(mbpa_model, mnist, task_permutation, True)
        end = time.time()
        time_needed_baseline = round(end - start)
        # print("Training time elapased: ", time_needed_baseline, "s")
        logger.info("Training time elapased: {}s".format(time_needed_baseline))

        # print("\nMemory-based parameter Adaptation....\n")
        logger.info("\nMemory-based test parameter Adaptation....\n")
        start = time.time()
        # mbpa_performance = training(mbpa_model, mnist, task_permutation, True)
        mbpa_test_performance = training_knn(mbpa_test_model, mnist,
                                             task_permutation, True)
        end = time.time()
        time_needed_baseline = round(end - start)
        print("Training time elapased: ", time_needed_baseline, "s")
        logger.info("Training time elapased: {}s".format(time_needed_baseline))
        plot_result(args.num_tasks_to_run, performance_baseline,
                    mbpa_performance, mbpa_test_performance,
                    (args.log.split("/")[-1]).split(".")[0])
예제 #9
0
파일: simple_tsp.py 프로젝트: arita37/oclGA
def run(num_chromosomes, generations):
    num_cities = 20
    random.seed(119)
    city_ids = list(range(0, num_cities))
    city_info = {
        city_id: (random.random() * 100, random.random() * 100)
        for city_id in city_ids
    }

    sample = ShufflerChromosome([SimpleGene(v, city_ids) for v in city_ids])

    tsp_path = os.path.dirname(os.path.abspath(__file__))
    ocl_kernels = os.path.realpath(os.path.join(tsp_path, "..", "..",
                                                "kernel"))
    tsp_kernels = os.path.join(tsp_path, "kernel")

    f = open(os.path.join(tsp_kernels, "simple_tsp.c"), "r")
    fstr = "".join(f.readlines())
    f.close()

    pointX = [str(city_info[v][0]) for v in city_info]
    pointY = [str(city_info[v][1]) for v in city_info]

    tsp_ga_cl = OpenCLGA(sample, generations, num_chromosomes, fstr,
                         "simple_tsp_fitness", [{
                             "t": "float",
                             "v": pointX,
                             "n": "x"
                         }, {
                             "t": "float",
                             "v": pointY,
                             "n": "y"
                         }], [ocl_kernels])

    prob_mutate = 0.1
    prob_cross = 0.8
    tsp_ga_cl.run(prob_mutate, prob_cross)

    print("run took", tsp_ga_cl.elapsed_time, "seconds")
    best = tsp_ga_cl.best
    print("Shortest Path: " + " => ".join(str(g) for g in best))

    utils.plot_result(city_info, best)
예제 #10
0
파일: main.py 프로젝트: ktro2828/ResNet50
def main():

    seed = 1234
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

    client.notify("==> Loading the dataset...")
    dataset = load_cifar10(batch=args.batch)
    train_dl = dataset['train']
    test_dl = dataset['test']

    client.notify("==> Loading the model...")
    net = Resnet50(output_dim=10).to(device)
    if args.weight_file is not None:
        weights = torch.load(weight_file)
        net.load_state_dict(weights, strict=False)

    if os.exists('./models') is False:
        os.makedirs('./models')

    optimizer = optimizers.Adam(net.parameters(), lr=1e-4)
    lr_scheduler = optimizers.lr_scheduler.StepLR(optimizer, 5, 0.1)

    history = {
        'epochs': np.arange(1, args.epochs+1),
        'train_loss': [],
        'train_acc': [],
        'test_loss': [],
        'test_acc': []
    }

    client.notify('==> Start training...')
    for epoch in range(args.epoch):
        train(net, optimizer, train_dl, epoch, history)
        lr_scheduler.step()
        test(net, test_dl, epoch, history)s

    client.notify("==> Training Done")

    plot_result(history)
    client.notify('==> Saved plot')
예제 #11
0
def main(args):
    env = gym.make('CartPole-v0')
    policy = Policy(dim_hidden=args.dim_hidden)
    agent = REINFORCEAgent(env=env,
                           policy=policy,
                           lr=args.lr,
                           gamma=args.gamma,
                           render=args.render)

    scores = 0
    history = []
    for i in range(args.n_episodes):
        scores += agent.run_episode()
        if (i + 1) % args.print_interval == 0:
            print(
                f"[Episode {i + 1}] Avg Score: {scores / args.print_interval:.3f}"
            )
            history.append(scores / args.print_interval)
            scores = 0.0

    plot_result(history, args.print_interval)
예제 #12
0
파일: train.py 프로젝트: o84577/ISGAN
def get_performance2():
    ssim_encoder = [[], []]
    ssim_decoder = [[], []]
    mse_encoder = [[], []]
    mse_decoder = [[], []]
    fig1, ax1 = plt.subplots()
    fig2, ax2 = plt.subplots()
    fig3, ax3 = plt.subplots()
    fig4, ax4 = plt.subplots()

    for epoch in range(1, 51):
        encoder.load_state_dict(
            torch.load('./checkpoint/encoder_{:d}.pkl'.format(epoch)))
        decoder.load_state_dict(
            torch.load('./checkpoint/decoder_{:d}.pkl'.format(epoch)))
        encoder.eval()
        decoder.eval()
        mse1, mse2, ssim1, ssim2, num = list(), list(), list(), list(), 0
        for idx, (secret, cover) in tqdm(enumerate(train_data_loader)):
            num += 1
            if params.use_cuda:
                secret = secret.cuda()
                cover = cover.cuda()
            secret = Variable(secret)
            cover = Variable(cover)
            stego = encoder(secret, cover)
            secret2 = decoder(stego)
            mse1.append(MSE_loss(stego, cover).data[0])
            mse2.append(MSE_loss(secret2, secret).data[0])
            ssim1.append(SSIM_loss(stego, cover).data[0])
            ssim2.append(SSIM_loss(secret2, secret).data[0])
        tmp1, tmp2, tmp3, tmp4 = sum(ssim1) / num, sum(ssim2) / num, sum(
            mse1) / num, sum(mse2) / num
        print(
            'training Epoch {:d}/50, ssim_encoder: {:5f}, ssim_decoder: {:5f}, mse_encoder: {:5f}, mse_decoder: {:5}'
            .format(epoch, tmp1, tmp2, tmp3, tmp4))
        ssim_encoder[0].append(tmp1)
        ssim_decoder[0].append(tmp2)
        mse_encoder[0].append(tmp3)
        mse_decoder[0].append(tmp4)
        ax1.plot(ssim_encoder[0], 'r')
        ax1.plot(ssim_decoder[0], 'b')
        fig1.savefig('{:s}_ssim.jpg'.format('train'))
        ax2.plot(mse_encoder[0], 'r')
        ax2.plot(mse_decoder[0], 'b')
        fig2.savefig('{:s}_mse.jpg'.format('train'))

    for epoch in range(1, 51):
        encoder.load_state_dict(
            torch.load('./checkpoint/encoder_{:d}.pkl'.format(epoch)))
        decoder.load_state_dict(
            torch.load('./checkpoint/decoder_{:d}.pkl'.format(epoch)))
        encoder.eval()
        decoder.eval()
        mse1, mse2, ssim1, ssim2, num = list(), list(), list(), list(), 0
        for idx, (secret, cover) in tqdm(enumerate(test_data_loader)):
            num += 1
            if params.use_cuda:
                secret = secret.cuda()
                cover = cover.cuda()
            secret = Variable(secret)
            cover = Variable(cover)
            stego = encoder(secret, cover)
            secret2 = decoder(stego)
            mse1.append(MSE_loss(stego, cover).data[0])
            mse2.append(MSE_loss(secret2, secret).data[0])
            ssim1.append(SSIM_loss(stego, cover).data[0])
            ssim2.append(SSIM_loss(secret2, secret).data[0])

        tmp1, tmp2, tmp3, tmp4 = sum(ssim1) / num, sum(ssim2) / num, sum(
            mse1) / num, sum(mse2) / num
        print(
            'Validating Epoch {:d}/50, ssim_encoder: {:5f}, ssim_decoder: {:5f}, mse_encoder: {:5f}, mse_decoder: {:5}'
            .format(epoch, tmp1, tmp2, tmp3, tmp4))
        ssim_encoder[1].append(tmp1)
        ssim_decoder[1].append(tmp2)
        mse_encoder[1].append(tmp3)
        mse_decoder[1].append(tmp4)
        plot_result('ssim', ssim_encoder + ssim_decoder)
        plot_result('mse', mse_decoder + mse_decoder)
        ax3.plot(ssim_encoder[1], 'r')
        ax3.plot(ssim_decoder[1], 'b')
        fig3.savefig('{:s}_ssim.jpg'.format('val'))
        ax4.plot(mse_encoder[1], 'r')
        ax4.plot(mse_decoder[1], 'b')
        fig4.savefig('{:s}_mse.jpg'.format('val'))
예제 #13
0
def simulate_auto_regulation():

    P_NAMES = ("gene g", "Protein dimer $P_2$", "mRNA r", "Protein P",
               "Repression product  $gP_2$")
    P_NAMES_STOCH = ("Repression product $gP_2$", "gene g", "mRNA r",
                     "Protein P", "Protein dimer $P_2$")
    # simulate using Deterministic simulation (DSM)
    M, c, S, P_I, k = generate_auto_reg_instance()
    P_init = np.array([10, 0, 0, 0, 0])
    k_guess = np.array([1, 10, 0.01, 10, 1, 1, 0.1, 0.01])
    T_max = 250
    step_size = 0.01

    # Values in order:
    # r,P_2, r, P, gP_2
    P_dsm, T_dsm = deterministic_simulation(auto_regulatory_odefun, P_init,
                                            T_max, step_size, k_guess)
    plot_result(T_dsm,
                P_dsm,
                title="Deterministic Auto-regulation",
                legend=P_NAMES)
    #
    # For stochastic values are in order:
    ## gP_2, g, r, P, P_2
    # simulate using Gillespie
    #T_g, X_g = gillespieSSA(S, M, auto_regulatory_hazards, c, t_max=T_max)
    # plot_result(T_g, X_g, title="Gillespie dimeritisation",
    #            legend=P_NAMES_STOCH)
    # plot_result(T_g, X_g[:, 2], title="Gillespie dimeritisation",
    # legend=("RNA"))
    #
    # simulate using the Poisson approximation method
    #Nt = 1000
    # T_p, X_p = poisson_approx(
    #    S, M, auto_regulatory_hazards, c, np.linspace(1, T_max, Nt))
    # plot_result(T_p, X_p, title="Poisson auto-regulation",
    #            legend=P_NAMES_STOCH)
    # plot_result(T_p, X_p[:, 4], title="Poisson auto-regulation",
    #            legend=("P_2"))
    #
    # simulate using the CLE method
    ## M, c, S = generate_auto_reg_instance()
    # Nt = 1000  # choosing delta_t such that propensity * delta_t >> 1.
    #
    #T_p, X_p = CLE(S, M, auto_regulatory_hazards, c, np.linspace(1, T_max, Nt))
    #plot_result(T_p, X_p, title="CLE auto-regulation", legend=P_NAMES_STOCH)

    # simulate using Gillespie
    simulate_many(S, M, auto_regulatory_hazards, c, T_max, P_NAMES_STOCH,
                  "Gillespie", gillespieSSA, "Auto-regulation")

    # Linspace size
    Nt = 4000

    # simulate using the Poisson approximation method
    simulate_many(S, M, auto_regulatory_hazards, c, T_max, P_NAMES_STOCH,
                  "Poisson", poisson_approx, "Auto-regulation", Nt)

    # simulate using the CLE method
    simulate_many(S, M, auto_regulatory_hazards, c, T_max, P_NAMES_STOCH,
                  "CLE", CLE, "Auto-regulation", Nt)
예제 #14
0
        # Step 3. Run our forward pass. We combine this step with get_loss function
        #tag_scores = model(sentence_in)

        # Step 4. Compute the loss, gradients, and update the parameters by calling
        # first check whether we use lower this parameter
        if opts.lower == 1:
            # We first convert it to one-hot, then input
            input_caps = torch.LongTensor(train_data[index]['caps'])
            loss = model.get_loss(targets,
                                  input_words=input_words,
                                  input_caps=input_caps)
        else:
            loss = model.get_loss(targets, input_words=input_words)

        epoch_costs.append(loss.data.numpy())
        loss.backward()
        nn.utils.clip_grad_norm(model.parameters(), opts.clip)
        optimizer.step()

    print("Epoch %i, cost average: %f" % (epoch, np.mean(epoch_costs)))

# Final Evaluation after training
eval_result = evaluate(model, dev_data, dictionaries, opts.lower)
accuracys.append(eval_result['accuracy'])
precisions.append(eval_result['precision'])
recalls.append(eval_result['recall'])
FB1s.append(eval_result['FB1'])

print("Plot final result")
plot_result(accuracys, precisions, recalls, FB1s)
예제 #15
0
        json_path), "No json configuration file found at {}".format(json_path)
    params = utils.Params(json_path)

    params.cuda = torch.cuda.is_available()
    seed_everything(params.seed)
    utils.set_logger(
        os.path.join(args.model_dir, "log/" + args.mode + VERSION + ".log"))

    model = ResNet50((224, 224), 5)
    loss_fn = torch.nn.CrossEntropyLoss()
    optimizer = Adam(model.parameters(), lr=params.lr, eps=1e-4, amsgrad=True)
    dataloaders = fetch_dataloader(args.data_dir, [0.8, 0.1, 0.1], params)

    if (args.restore_file):
        model.load_state_dict(torch.load(args.restore_file))
    if (torch.cuda.is_available()):
        model = model.cuda()
    if (args.mode == 'train'):
        train_losses, train_accs, val_losses, val_accs = train_and_eval(
            model, loss_fn, dataloaders['train'], dataloaders['val'],
            optimizer, params.epoch, accuracy,
            os.path.join(args.model_dir, "model"))
        plot_result(
            train_losses, val_losses, "loss",
            os.path.join(args.model_dir, "log/loss" + VERSION + ".png"))
        plot_result(
            train_accs, val_accs, "metric",
            os.path.join(args.model_dir, "log/metric" + VERSION + ".png"))
    elif (args.restore_file):
        evaluate(model, loss_fn, dataloaders['test'], accuracy)
    print("Epoch %i, cost average: %f" % (epoch, np.mean(epoch_costs)))
    # Save model and dictionaries

# Final Evaluation after training
eval_result = evaluate(model, dev_data, dictionaries)
accuracys.append(eval_result['accuracy'])
precisions.append(eval_result['precision'])
recalls.append(eval_result['recall'])
FB1s.append(eval_result['FB1'])

# Save model and dictionaries
save_model_dictionaries('model', model, dictionaries, opts)

# Plot Result
print("Plot final result")
plot_result(accuracys, precisions, recalls, FB1s)
"""
def process(path,model,dictionaries):
    fns=[os.path.join(fn) for root,dirs,files in os.walk(path) for fn in files]
    for afile in fns:
        sentences = load_dataset(Parse_parameters,path+"/"+afile,dictionaries)
        with codecs.open("tmp/"+path+"/"+afile,'w','utf8') as f:
            for index in xrange(len(sentences)):
                #input sentence
                input_words = autograd.Variable(torch.LongTensor(sentences[index]['words']))

                #calculate the tag score
                tags,sen_score= model.get_tags(input_words = input_words)

                # get predict tags
                predict_tags = [dictionaries['id_to_tag'][tag] if (tag in dictionaries['id_to_tag']) else 'START_STOP' for tag in tags]
예제 #17
0
# create validation data - here we'll just a 1-d grid
X_val = np.atleast_2d(np.linspace(-3, 3, 100)).T
y_val = np.expand_dims(X_val[:, 0], 1)  # just dummy data

params = {
    "init_stddev_1_w": np.sqrt(10),
    "init_stddev_1_b": np.sqrt(10),  # set these equal
    "init_stddev_2_w": 1.0 / np.sqrt(100)  # normal scaling
}

trainer = Trainer(X_train=X_train,
                  X_val=X_val,
                  y_train=y_train,
                  base=NN,
                  n_ensembles=5,
                  data_noise=0.001,
                  params=params)

plot_priors(X_val, trainer.y_prior, n_ensembles=5)

trainer.train()

trainer.predict()

plot_pred(X_train, X_val, y_train, trainer.y_pred, n_ensembles=5)

y_pred_mu, y_pred_std = trainer.ensemble()

plot_result(X_train, X_val, y_train, trainer.y_pred, y_pred_mu, y_pred_std)
예제 #18
0
    print("The Train loss is {0:8.5f}".format(loss_show))
    print("-" * 20)
    Debug_flag = 0
    '''
        After one epoch using testset to get the final acc
    '''
    model.eval()
    with torch.no_grad():
        for j, test_data in enumerate(testloader):
            # if Debug_flag == 2:
            #     break
            test_img = test_data[0].to(device)
            label = test_data[1].to(device)

            _, pred = model(test_img, apply_softmax=True).max(dim=1)
            # print("The prediction is ")
            # print(pred)
            # print("The label is ")
            # print(label)
            acc = get_accuracy(pred, label)
            total_acc = (acc + total_acc) / 2 if total_acc != 0.0 else acc

            # Debug_flag += 1
    print("acc is {0:4.1f}%".format(total_acc))
    best_acc.append(total_acc)
    print("{0:2d} epochs ends".format(i))

plot_result(best_acc)
torch.save(model.state_dict(), "model.pt")
예제 #19
0

for c in continuous:
    data = df[c]
    best_fit_name, best_fit_params = best_fit_distribution(data, 50)
    best_distributions.append((best_fit_name, best_fit_params))



# Result
best_distributions = [
    ('fisk', (11.744665309421649, -66.15529969956657, 94.73575225186589)),
    ('halfcauchy', (-5.537941926133496e-09, 17.86796415175786))]


plot_result(df, continuous, best_distributions)


def generate_like_df(df, categorical_cols, continuous_cols, best_distributions, n, seed=0):
    np.random.seed(seed)
    d = {}

    for c in categorical_cols:
        counts = df[c].value_counts()
        d[c] = np.random.choice(list(counts.index), p=(counts/len(df)).values, size=n)

    for c, bd in zip(continuous_cols, best_distributions):
        dist = getattr(scipy.stats, bd[0])
        d[c] = dist.rvs(size=n, *bd[1])

    return pd.DataFrame(d, columns=categorical_cols+continuous_cols)
예제 #20
0
####################################################################
"""
PART III: Training and test performance
"""
best_loss = 1e5
# path for saving model
model_path = model_name + '.pth'

for epoch in range(1, num_epochs+1):
    train_loss_i = train(model_CNN, device, train_loader, criterion, optimizer, epoch)
    validation_loss_i = test(model_CNN, device, validation_loader, criterion, epoch)
    scheduler.step()
    print('Optimizer Learning rate: {0:.5f}'.format(optimizer.param_groups[0]['lr']))
    train_loss.append(train_loss_i)
    validation_loss.append(validation_loss_i)
    if best_loss > validation_loss_i:
        # if the current loss is lower than the best possible loss
        # save the model
        ts = datetime.datetime.now().strftime("%d_%b_%Y@%H:%M:%S")
        print(ts + ' >>Saving the model: Test loss at {:.4f}'.format(validation_loss_i))
        torch.save(model_CNN.state_dict(), model_path)
        best_loss = validation_loss_i
    if epoch == num_epochs:
        # Save training model to evaluate performance on training set
        torch.save(model_CNN.state_dict(), 'training_model.pth')
        # Last loop, evaluate on test set
        print('Complete the training process, Evaluating on the test set...')
        test_loss = test(model_CNN, device, test_loader, criterion, epoch)
plot_result(train_loss, validation_loss, num_epochs)