Ejemplo n.º 1
0
    def __init__(self, save_dir='checkpoints', log_dir='logs', gpu_ids=[0]):
        self.model_names = ['netG']
        self.save_dir = save_dir
        if not os.path.isdir(save_dir):
            os.makedirs(save_dir)
        self.gpu_ids = gpu_ids
        self.logger = Logger(log_dir)
        # Decide which device we want to run on
        self.device = torch.device("cuda:{}".format(self.gpu_ids[0]) if (
            torch.cuda.is_available() and ngpu > 0) else "cpu")

        self.netG = UnetGenerator(input_nc=3,
                                  output_nc=3,
                                  num_downs=8,
                                  ngf=128,
                                  norm_layer=nn.BatchNorm2d,
                                  use_dropout=True).to(self.device)
        init_net(self.netG, 'normal', 0.002, [0])
Ejemplo n.º 2
0
def test_model(test_trees, labels, embeddings, embedding_lookup, opt):

    logdir = opt.model_path
    batch_size = opt.train_batch_size
    epochs = opt.niter
    num_feats = len(embeddings[0])

    random.shuffle(test_trees)

    # build the inputs and outputs of the network
    nodes_node, children_node, hidden_node, pooling = network.init_net(
        num_feats, len(labels), opt.aggregation)

    out_node = network.out_layer(hidden_node)
    labels_node, loss_node = network.loss_layer(hidden_node, len(labels))

    optimizer = tf.train.AdamOptimizer(LEARN_RATE)
    train_step = optimizer.minimize(loss_node)

    sess = tf.Session()  #config=tf.ConfigProto(device_count={'GPU':0}))
    sess.run(tf.global_variables_initializer())
    with tf.name_scope('saver'):
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(logdir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Continue training with old model")
            saver.restore(sess, ckpt.model_checkpoint_path)
            for i, var in enumerate(saver._var_list):
                print('Var {}: {}'.format(i, var))

    checkfile = os.path.join(logdir, 'cnn_tree.ckpt')

    correct_labels = []
    predictions = []
    print('Computing training accuracy...')
    for batch in sampling.batch_samples(
            sampling.gen_samples(test_trees, labels, embeddings,
                                 embedding_lookup), 1):
        nodes, children, batch_labels = batch
        output = sess.run([out_node],
                          feed_dict={
                              nodes_node: nodes,
                              children_node: children,
                          })
        # print(output)
        correct_labels.append(np.argmax(batch_labels))
        predictions.append(np.argmax(output))

    target_names = list(labels)
    print('Accuracy:', accuracy_score(correct_labels, predictions))
    print(
        classification_report(correct_labels,
                              predictions,
                              target_names=target_names))
    print(confusion_matrix(correct_labels, predictions))
Ejemplo n.º 3
0
def test_model(logdir, infile, embedfile):
    """Test a classifier to label ASTs"""

    with open(infile, 'rb') as fh:
        _, trees, labels = pickle.load(fh)

    with open(embedfile, 'rb') as fh:
        embeddings, embed_lookup = pickle.load(fh)
        num_feats = len(embeddings[0])

    # build the inputs and outputs of the network
    nodes_node, children_node, hidden_node = network.init_net(
        num_feats, len(labels))
    out_node = network.out_layer(hidden_node)

    ### init the graph
    sess = tf.Session()  #config=tf.ConfigProto(device_count={'GPU':0}))
    sess.run(tf.global_variables_initializer())

    with tf.name_scope('saver'):
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(logdir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
        else:
            raise 'Checkpoint not found.'

    correct_labels = []
    # make predicitons from the input
    predictions = []
    step = 0
    for batch in sampling.batch_samples(
            sampling.gen_fast_samples(trees, labels, embeddings, embed_lookup),
            1):
        nodes, children, batch_labels = batch
        output = sess.run([out_node],
                          feed_dict={
                              nodes_node: nodes,
                              children_node: children,
                          })
        correct_labels.append(np.argmax(batch_labels))
        predictions.append(np.argmax(output))
        step += 1
        print(step, '/', len(trees))

    target_names = list(labels)
    print('Accuracy:', accuracy_score(correct_labels, predictions))
    print(
        classification_report(correct_labels,
                              predictions,
                              target_names=target_names))
    print(confusion_matrix(correct_labels, predictions))
Ejemplo n.º 4
0
    def build_model(self):
        # Build model
        if self.config.model_type == "UNet":
            self.models[self.model_types[0]] = network.UNet(in_channels=self.config.num_img_ch,
                                                            out_channels=self.config.num_classes,
                                                            num_features=self.config.num_features)
        else:
            raise NotImplementedError("Model type [%s] is not implemented" % self.config.model_type)

        # Build optimizer
        self.optimizers[self.model_types[0]] = optim.Adam(self.models[self.model_types[0]].parameters(),
                                                          lr=self.config.lr_opt["init"],
                                                          betas=(0.9, 0.999),
                                                          weight_decay=self.config.l2_penalty)

        # Build criterion
        self.criteria["bce"] = nn.BCELoss  # Binary cross entropy
        self.criteria["l1"] = nn.L1Loss()  # absolute-value norm (L1 norm)

        # Model initialization
        for model_type in self.model_types:
            self.models[model_type] = network.init_net(self.models[model_type],
                                                       init_type="kaiming", init_gain=0.02,
                                                       device_ids=self.config.device_ids)
Ejemplo n.º 5
0
def learn_vectors(samples,
                  logdir,
                  outfile,
                  num_feats=NUM_FEATURES,
                  epochs=EPOCHS):
    """Learn a vector representation of Python AST nodes."""

    # build the inputs and outputs of the network
    input_node, label_node, embed_node, loss_node = network.init_net(
        num_feats=num_feats, batch_size=BATCH_SIZE)

    # use gradient descent with momentum to minimize the training objective
    train_step = tf.train.GradientDescentOptimizer(LEARN_RATE). \
                    minimize(loss_node)

    tf.summary.scalar('loss', loss_node)

    ### init the graph
    sess = tf.Session()

    with tf.name_scope('saver'):
        saver = tf.train.Saver()
        summaries = tf.summary.merge_all()
        writer = tf.summary.FileWriter(logdir, sess.graph)
        config = projector.ProjectorConfig()
        embedding = config.embeddings.add()
        embedding.tensor_name = embed_node.name
        embedding.metadata_path = os.path.join('vectorizer', 'metadata.tsv')
        projector.visualize_embeddings(writer, config)

    sess.run(tf.global_variables_initializer())

    checkfile = os.path.join(logdir, 'ast2vec.ckpt')

    embed_file = open(outfile, 'wb')

    step = 0
    for epoch in range(1, epochs + 1):
        sample_gen = sampling.batch_samples_for_fast(samples, BATCH_SIZE)
        for batch in sample_gen:
            input_batch, label_batch = batch
            # print label_batch
            _, summary, embed, err = sess.run(
                [train_step, summaries, embed_node, loss_node],
                feed_dict={
                    input_node: input_batch,
                    label_node: label_batch
                })

            # print('Epoch: ', epoch, 'Loss: ', err)
            writer.add_summary(summary, step)
            if step % CHECKPOINT_EVERY == 0:
                # save state so we can resume later
                saver.save(sess, os.path.join(checkfile), step)
                print('Checkpoint saved, epoch: ' + str(epoch) + ', step: ' +
                      str(step) + ', loss:' + str(err) + '.')
                # save embeddings
                pickle.dump((embed, NODE_MAP), embed_file)
            step += 1

    # save embeddings and the mapping
    pickle.dump((embed, NODE_MAP), embed_file)
    embed_file.close()
    saver.save(sess, os.path.join(checkfile), step)
Ejemplo n.º 6
0
def train_model(train_trees, val_trees, labels, embeddings, embedding_lookup,
                opt):
    """Train a classifier to label ASTs"""

    logdir = opt.model_path
    batch_size = opt.train_batch_size
    epochs = opt.niter
    num_feats = len(embeddings[0])

    random.shuffle(train_trees)
    random.shuffle(val_trees)

    nodes_node, children_node, hidden_node, attention_score_node = network.init_net(
        num_feats, len(labels), opt.aggregation)
    hidden_node = tf.identity(hidden_node, name="hidden_node")

    out_node = network.out_layer(hidden_node)
    labels_node, loss_node = network.loss_layer(hidden_node, len(labels))

    optimizer = tf.train.AdamOptimizer(LEARN_RATE)
    train_step = optimizer.minimize(loss_node)

    ### init the graph
    sess = tf.Session()  #config=tf.ConfigProto(device_count={'GPU':0}))
    sess.run(tf.global_variables_initializer())

    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()

    with tf.name_scope('saver'):
        saver = tf.train.Saver()
        ckpt = tf.train.get_checkpoint_state(logdir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Continue training with old model")
            saver.restore(sess, ckpt.model_checkpoint_path)
            for i, var in enumerate(saver._var_list):
                print('Var {}: {}'.format(i, var))

    checkfile = os.path.join(logdir, 'cnn_tree.ckpt')

    print("Begin training..........")
    num_batches = len(train_trees) // batch_size + (
        1 if len(train_trees) % batch_size != 0 else 0)
    for epoch in range(1, epochs + 1):
        for i, batch in enumerate(
                sampling.batch_samples(
                    sampling.gen_samples(train_trees, labels, embeddings,
                                         embedding_lookup), batch_size)):
            nodes, children, batch_labels = batch
            step = (epoch - 1) * num_batches + i * BATCH_SIZE

            if not nodes:
                continue  # don't try to train on an empty batch
            # print(batch_labels)
            _, err, out = sess.run(
                [train_step, loss_node, out_node],
                feed_dict={
                    nodes_node: nodes,
                    children_node: children,
                    labels_node: batch_labels
                })

            print('Epoch:', epoch, 'Step:', step, 'Loss:', err, 'Max nodes:',
                  len(nodes[0]))
            # print(attention_score[0])
            # print(len(attention_score[0]))
            # print(pooling_output.shape)

            if step % CHECKPOINT_EVERY == 0:
                # save state so we can resume later
                saver.save(sess, checkfile)
                # shutil.rmtree(savedmodel_path)

                print('Checkpoint saved, epoch:' + str(epoch) + ', step: ' +
                      str(step) + ', loss: ' + str(err) + '.')

        correct_labels = []
        predictions = []
        for batch in sampling.batch_samples(
                sampling.gen_samples(val_trees, labels, embeddings,
                                     embedding_lookup), 1):
            nodes, children, batch_labels = batch
            output = sess.run([out_node],
                              feed_dict={
                                  nodes_node: nodes,
                                  children_node: children,
                              })
            # print(output)
            correct_labels.append(np.argmax(batch_labels))
            predictions.append(np.argmax(output))

        target_names = list(labels)
        print('Accuracy:', accuracy_score(correct_labels, predictions))
        print(
            classification_report(correct_labels,
                                  predictions,
                                  target_names=target_names))
        print(confusion_matrix(correct_labels, predictions))

    print("Finish all iters, storring the whole model..........")
    saver.save(sess, checkfile)
Ejemplo n.º 7
0
def main(opt):

    target_directory = "live_test/github_java/sort_function/"
    file_name = aggregation_name + "_" + distributed_function_name + "_function.csv"

    print("Loading embeddings....")
    with open(opt.embeddings_directory, 'rb') as fh:
        embeddings, embed_lookup = pickle.load(fh,encoding='latin1')
    labels = [str(i) for i in range(1, opt.n_classes+1)]
    logdir = opt.model_path
    batch_size = opt.test_batch_size
    epochs = opt.niter
    node_embedding_size = len(embeddings[0])

    # Loading program file
    # test_trees, node_ids, node_types, subtree_ids, pkl_path = load_program(opt)

   
    # Init model
    checkfile = os.path.join(logdir, 'cnn_tree.ckpt')   
    ckpt = tf.train.get_checkpoint_state(logdir)
    
    initializer = tf.contrib.layers.xavier_initializer()
    weights = {
        "w_t" : tf.Variable(initializer([node_embedding_size, opt.feature_size]), name="w_t"),
        "w_l" : tf.Variable(initializer([node_embedding_size, opt.feature_size]), name="w_l"),
        "w_r" : tf.Variable(initializer([node_embedding_size, opt.feature_size]), name="w_r"),
        "w_attention" : tf.Variable(initializer([opt.feature_size,1]), name="w_attention")
    }

    biases = {
        "b_conv": tf.Variable(initializer([opt.feature_size,]), name="b_conv"),
    }

    nodes_node, children_node, hidden_node, attention_score_node = network.init_net(
        node_embedding_size,
        len(labels),
        opt.feature_size,
        weights,
        biases,
        opt.aggregation,
        opt.distributed_function
    )
   
    out_node = network.out_layer(hidden_node)
    labels_node, loss_node = network.loss_layer(hidden_node, len(labels))

    optimizer = tf.train.AdamOptimizer(LEARN_RATE)
    train_step = optimizer.minimize(loss_node)
    
    saver = tf.train.Saver(save_relative_paths=True, max_to_keep=5)
    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()
    logdir = opt.model_path
    batch_size = opt.test_batch_size
    epochs = opt.niter
    node_embedding_size = len(embeddings[0])

    with tf.Session() as sess:
        sess.run(init)
        ckpt = tf.train.get_checkpoint_state(logdir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Continue training with old model")
            print("Checkpoint path : " + str(ckpt.model_checkpoint_path))
            saver.restore(sess, ckpt.model_checkpoint_path)
            for i, var in enumerate(saver._var_list):
                print('Var {}: {}'.format(i, var))

       

        test_trees, node_ids, node_types, pkl_path, pb_path = load_program(opt.test_file)
        

        attention_score_scaled_map = predict(sess, out_node, attention_score_node, nodes_node, children_node, pkl_path, pb_path, test_trees, labels, node_ids, node_types, embeddings, embed_lookup)
        
        attention_path = os.path.join(opt.test_file.split(".")[0] + ".csv")

        if os.path.exists(attention_path):
            os.remove(attention_path)
        with open(attention_path,"a") as f:
           for k, v in attention_score_scaled_map.items():
                f.write(str(k) + "," + str(v))
                f.write("\n")


        generate_visualization(pb_path, attention_path)
Ejemplo n.º 8
0
if __name__ == '__main__':
    opt = gather_options()
    print_options(opt)

    device = torch.device('cuda:{}'.format(
        opt.gpu_ids[0])) if opt.gpu_ids else torch.device('cpu')

    trainloader, testloader = loadData(opt)
    dataset_size = len(trainloader)
    print('#training images = %d' % dataset_size)

    net = Resnet(opt.input_nc,
                 num_classes=opt.num_classes,
                 norm=opt.norm,
                 nl=opt.nl)
    net = init_net(net, init_type='normal', gpu_ids=[0])

    if opt.continue_train:
        load_networks(opt, net)

    criterion = nn.CrossEntropyLoss().to(device)
    optimizer = torch.optim.SGD(net.parameters(), lr=opt.lr, momentum=0.9)
    scheduler = get_scheduler(optimizer, opt)

    iter = 0
    running_loss = 0.0
    correct = 0.0
    total = 0

    writer = SummaryWriter()
    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
Ejemplo n.º 9
0
def train_model(logdir,
                infile,
                embedfile,
                epochs=EPOCHS,
                training="True",
                testing="True"):
    """Train a classifier to label ASTs"""

    print("Loading trees...")
    with open(infile, 'rb') as fh:
        trees, test_trees, labels = pickle.load(fh)

        random.shuffle(trees)

    print(labels)
    print("Loading embeddings....")
    with open(embedfile, 'rb') as fh:
        embeddings, embed_lookup = pickle.load(fh)
        num_feats = len(embeddings[0])

    # build the inputs and outputs of the network
    nodes_node, children_node, hidden_node = network.init_net(
        num_feats, len(labels))

    out_node = network.out_layer(hidden_node)
    labels_node, loss_node = network.loss_layer(hidden_node, len(labels))

    optimizer = tf.train.AdamOptimizer(LEARN_RATE)
    train_step = optimizer.minimize(loss_node)

    tf.summary.scalar('loss', loss_node)

    ### init the graph
    sess = tf.Session()  #config=tf.ConfigProto(device_count={'GPU':0}))
    sess.run(tf.global_variables_initializer())

    with tf.name_scope('saver'):
        saver = tf.train.Saver()
        summaries = tf.summary.merge_all()
        writer = tf.summary.FileWriter(logdir, sess.graph)
        ckpt = tf.train.get_checkpoint_state(logdir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Continue training with old model")
            saver.restore(sess, ckpt.model_checkpoint_path)

    checkfile = os.path.join(logdir, 'cnn_tree.ckpt')

    if training == "True":
        print("Begin training..........")
        num_batches = len(trees) // BATCH_SIZE + (
            1 if len(trees) % BATCH_SIZE != 0 else 0)
        for epoch in range(1, epochs + 1):
            for i, batch in enumerate(
                    sampling.batch_samples(
                        sampling.gen_samples(trees, labels, embeddings,
                                             embed_lookup), BATCH_SIZE)):
                nodes, children, batch_labels = batch
                step = (epoch - 1) * num_batches + i * BATCH_SIZE

                if not nodes:
                    continue  # don't try to train on an empty batch
                # print(batch_labels)
                _, summary, err, out = sess.run(
                    [train_step, summaries, loss_node, out_node],
                    feed_dict={
                        nodes_node: nodes,
                        children_node: children,
                        labels_node: batch_labels
                    })

                print('Epoch:', epoch, 'Step:', step, 'Loss:', err,
                      'Max nodes:', len(nodes[0]))

                writer.add_summary(summary, step)
                if step % CHECKPOINT_EVERY == 0:
                    # save state so we can resume later
                    saver.save(sess, os.path.join(checkfile), step)
                    print('Checkpoint saved, epoch:' + str(epoch) +
                          ', step: ' + str(step) + ', loss: ' + str(err) + '.')

        saver.save(sess, os.path.join(checkfile), step)

    # compute the training accuracy
    if testing == "True":
        correct_labels = []
        predictions = []
        print('Computing training accuracy...')
        for batch in sampling.batch_samples(
                sampling.gen_samples(test_trees, labels, embeddings,
                                     embed_lookup), 1):
            nodes, children, batch_labels = batch
            output = sess.run([out_node],
                              feed_dict={
                                  nodes_node: nodes,
                                  children_node: children,
                              })
            #print(output)
            correct_labels.append(np.argmax(batch_labels))
            predictions.append(np.argmax(output))

        target_names = list(labels)
        print('Accuracy:', accuracy_score(correct_labels, predictions))
        print(
            classification_report(correct_labels,
                                  predictions,
                                  target_names=target_names))
        print(confusion_matrix(correct_labels, predictions))
Ejemplo n.º 10
0
def test_model(test_trees, labels, embeddings, embedding_lookup, opt):
    logdir = opt.model_path
    batch_size = opt.train_batch_size
    epochs = opt.niter
    node_embedding_size = len(embeddings[0])

    random.shuffle(test_trees)

    checkfile = os.path.join(logdir, 'cnn_tree.ckpt')
    ckpt = tf.train.get_checkpoint_state(logdir)

    initializer = tf.contrib.layers.xavier_initializer()
    weights = {
        "w_t":
        tf.Variable(initializer([node_embedding_size, opt.feature_size]),
                    name="w_t"),
        "w_l":
        tf.Variable(initializer([node_embedding_size, opt.feature_size]),
                    name="w_l"),
        "w_r":
        tf.Variable(initializer([node_embedding_size, opt.feature_size]),
                    name="w_r"),
        "w_attention":
        tf.Variable(initializer([opt.feature_size, 1]), name="w_attention")
    }

    biases = {
        "b_conv": tf.Variable(initializer([
            opt.feature_size,
        ]), name="b_conv"),
    }

    nodes_node, children_node, hidden_node, attention_score_node = network.init_net(
        node_embedding_size, len(labels), opt.feature_size, weights, biases,
        opt.aggregation, opt.distributed_function)

    out_node = network.out_layer(hidden_node)
    labels_node, loss_node = network.loss_layer(hidden_node, len(labels))

    optimizer = tf.train.AdamOptimizer(LEARN_RATE)
    train_step = optimizer.minimize(loss_node)

    saver = tf.train.Saver(save_relative_paths=True, max_to_keep=5)
    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()

    with tf.Session() as sess:
        sess.run(init)
        ckpt = tf.train.get_checkpoint_state(logdir)
        if ckpt and ckpt.model_checkpoint_path:
            print("Continue training with old model")
            print("Checkpoint path : " + str(ckpt.model_checkpoint_path))
            saver.restore(sess, ckpt.model_checkpoint_path)

        correct_labels = []
        predictions = []
        print('Computing training accuracy...')
        for batch in sampling.batch_samples(
                sampling.gen_samples(test_trees, labels, embeddings,
                                     embedding_lookup), 1):
            nodes, children, batch_labels = batch
            output = sess.run([out_node],
                              feed_dict={
                                  nodes_node: nodes,
                                  children_node: children,
                              })

            # print(attention_score[0])
            # print(len(attention_score[0]))

            # print(output)
            correct_labels.append(np.argmax(batch_labels))
            predictions.append(np.argmax(output))

        target_names = list(labels)
        print('Accuracy:', accuracy_score(correct_labels, predictions))
        print(
            classification_report(correct_labels,
                                  predictions,
                                  target_names=target_names))
        print(confusion_matrix(correct_labels, predictions))
Ejemplo n.º 11
0
def train_model(train_trees, val_trees, labels, embeddings, embedding_lookup,
                opt):
    """Train a classifier to label ASTs"""

    logdir = opt.model_path
    batch_size = opt.train_batch_size
    epochs = opt.niter
    node_embedding_size = len(embeddings[0])

    random.shuffle(train_trees)
    random.shuffle(val_trees)
    # random.shuffle(test_trees)

    checkfile = os.path.join(logdir, 'cnn_tree.ckpt')
    ckpt = tf.train.get_checkpoint_state(logdir)

    initializer = tf.contrib.layers.xavier_initializer()
    weights = {
        "w_t":
        tf.Variable(initializer([node_embedding_size, opt.feature_size]),
                    name="w_t"),
        "w_l":
        tf.Variable(initializer([node_embedding_size, opt.feature_size]),
                    name="w_l"),
        "w_r":
        tf.Variable(initializer([node_embedding_size, opt.feature_size]),
                    name="w_r"),
        "w_attention":
        tf.Variable(initializer([opt.feature_size, 1]), name="w_attention")
    }

    biases = {
        "b_conv": tf.Variable(initializer([
            opt.feature_size,
        ]), name="b_conv"),
    }

    nodes_node, children_node, hidden_node, attention_score_node = network.init_net(
        node_embedding_size, len(labels), opt.feature_size, weights, biases,
        opt.aggregation, opt.distributed_function)

    out_node = network.out_layer(hidden_node)
    labels_node, loss_node = network.loss_layer(hidden_node, len(labels))

    optimizer = tf.train.AdamOptimizer(LEARN_RATE)
    train_step = optimizer.minimize(loss_node)

    saver = tf.train.Saver(save_relative_paths=True, max_to_keep=5)
    # Initialize the variables (i.e. assign their default value)
    init = tf.global_variables_initializer()

    if opt.training:
        print("Begin training..........")

        with tf.Session() as sess:

            sess.run(init)

            if ckpt and ckpt.model_checkpoint_path:
                print("Continue training with old model")
                print("Checkpoint path : " + str(ckpt.model_checkpoint_path))
                saver.restore(sess, ckpt.model_checkpoint_path)
                for i, var in enumerate(saver._var_list):
                    print('Var {}: {}'.format(i, var))

            # saved_model.loader.load(sess, [tag_constants.TRAINING], savedmodel_path)

            num_batches = len(train_trees) // batch_size + (
                1 if len(train_trees) % batch_size != 0 else 0)
            for epoch in range(1, epochs + 1):
                for i, batch in enumerate(
                        sampling.batch_samples(
                            sampling.gen_samples(train_trees, labels,
                                                 embeddings, embedding_lookup),
                            batch_size)):
                    nodes, children, batch_labels = batch
                    # print(len(batch_labels))
                    # print(len(batch_labels[0]))
                    step = (epoch - 1) * num_batches + i * BATCH_SIZE

                    if not nodes:
                        continue  # don't try to train on an empty batch
                    # print(batch_labels)
                    _, err, out = sess.run(
                        [train_step, loss_node, out_node],
                        feed_dict={
                            nodes_node: nodes,
                            children_node: children,
                            labels_node: batch_labels
                        })

                    print('Epoch:', epoch, 'Step:', step, 'Loss:', err,
                          'Max nodes:', len(nodes[0]))

                    if step % CHECKPOINT_EVERY == 0:
                        # save state so we can resume later
                        saver.save(sess, checkfile)
                        # shutil.rmtree(savedmodel_path)

                        print('Checkpoint saved, epoch:' + str(epoch) +
                              ', step: ' + str(step) + ', loss: ' + str(err) +
                              '.')

                correct_labels = []
                predictions = []
                for batch in sampling.batch_samples(
                        sampling.gen_samples(val_trees, labels, embeddings,
                                             embedding_lookup), 1):
                    nodes, children, batch_labels = batch
                    output = sess.run([out_node],
                                      feed_dict={
                                          nodes_node: nodes,
                                          children_node: children,
                                      })
                    # print(output)
                    correct_labels.append(np.argmax(batch_labels))
                    predictions.append(np.argmax(output))

                target_names = list(labels)
                print('Accuracy:', accuracy_score(correct_labels, predictions))
                print(
                    classification_report(correct_labels,
                                          predictions,
                                          target_names=target_names))
                print(confusion_matrix(correct_labels, predictions))

            print("Finish all iters, storring the whole model..........")
            saver.save(sess, checkfile)
Ejemplo n.º 12
0
    """
    for i in range(len(squares)):
        x, y, r = squares[i]
        if (len(drawed_s) > i):
            canvas.coords(drawed_s[i], x - r, y - r, x + r, y + r)
        else:
            drawed_s.append(canvas.create_rectangle(x - r, y - r, x + r, y + r))
    """

    canvas.pack()


def idle_func():
    global color_i
    color_i += 1
    color_i %= 5
    circles, squares = network.get_objs()
    draw(canvas, circles, squares)
    canvas.after(20, idle_func)

network.init_net()
tk = tkinter.Tk()
canvas = tkinter.Canvas(tk)

canvas.pack()
idle_func()
tk.mainloop()
print(1)


Ejemplo n.º 13
0
    def __init__(self, opt, gpu_ids=[0], continue_run=None):
        self.opt = opt
        self.kt = 0
        self.lamk = 0.001
        self.lambdaImg = 100
        self.lambdaGan = 1.0
        self.model_names = ['netD', 'netG']
        self.gpu_ids = gpu_ids

        if not continue_run:
            expname = '-'.join([
                'b_' + str(self.opt.batchSize), 'ngf_' + str(self.opt.ngf),
                'ndf_' + str(self.opt.ndf), 'gm_' + str(self.opt.gamma)
            ])
            self.rundir = self.opt.rundir + '/pix2pixBEGAN-' + datetime.now(
            ).strftime('%B%d-%H-%M-%S') + expname + self.opt.comment
            if not os.path.isdir(self.rundir):
                os.mkdir(self.rundir)
            with open(self.rundir + '/options.pkl', 'wb') as file:
                pickle.dump(opt, file)
        else:
            self.rundir = continue_run
            if os.path.isfile(self.rundir + '/options.pkl'):
                with open(self.rundir + '/options.pkl', 'rb') as file:
                    tmp = opt.rundir
                    tmp_lr = opt.lr
                    self.opt = pickle.load(file)
                    self.opt.rundir = tmp
                    self.opt.lr = tmp_lr

        self.netG = UnetGenerator(input_nc=3,
                                  output_nc=3,
                                  num_downs=7,
                                  ngf=self.opt.ngf,
                                  norm_layer=nn.BatchNorm2d,
                                  use_dropout=True)
        self.netD = UnetDescriminator(input_nc=3,
                                      output_nc=3,
                                      num_downs=7,
                                      ngf=self.opt.ndf,
                                      norm_layer=nn.BatchNorm2d,
                                      use_dropout=True)

        # Decide which device we want to run on
        self.device = torch.device("cuda:0" if (
            torch.cuda.is_available()) else "cpu")

        init_net(self.netG, 'normal', 0.002, [0])
        init_net(self.netD, 'normal', 0.002, [0])

        self.netG.to(self.device)
        self.netD.to(self.device)
        self.imagePool = ImagePool(pool_size)

        self.criterionL1 = torch.nn.L1Loss()

        if continue_run:
            self.load_networks('latest')

        self.writer = Logger(self.rundir)
        self.start_step, self.opt.lr = self.writer.get_latest(
            'misc/lr', self.opt.lr)

        # initialize optimizers
        self.optimG = torch.optim.Adam(self.netG.parameters(),
                                       lr=self.opt.lr,
                                       betas=(beta1, 0.999))
        self.optimD = torch.optim.Adam(self.netD.parameters(),
                                       lr=self.opt.lr,
                                       betas=(beta1, 0.999))