Beispiel #1
0
def main(args):
    # Filepaths
    config_file = utils.data("wireframe.yaml")
    model_file = utils.data("pretrained_lcnn.pth.tar")

    w = wireframe.Wireframe(config_file, model_file, "")
    if not w.setup():
        print(w.error)
    else:
        print("w is setup successfully")

    save_wireframe_records(args.project_directory, w)
Beispiel #2
0
    def __init__(self, num_steps, model_load_path, num_test_rec):
        """
        Initializes the Adversarial Video Generation Runner.

        @param num_steps: The number of training steps to run.
        @param model_load_path: The path from which to load a previously-saved model.
                                Default = None.
        @param num_test_rec: The number of recursive generations to produce when testing. Recursive
                             generations use previous generations as input to predict further into
                             the future.
        """

        self.global_step = 0
        self.num_steps = num_steps
        self.num_test_rec = num_test_rec

        self.sess = tf.Session()
        self.summary_writer = tf.summary.FileWriter(c.SUMMARY_SAVE_DIR,
                                                    graph=self.sess.graph,
                                                    flush_secs=30)

        # Init data collection
        print('Init data...')
        self.train_data = data(c.TRAIN_DIR)
        self.test_data = data(c.TEST_DIR)

        if c.ADVERSARIAL:
            print('Init discriminator...')
            self.d_model = DiscriminatorModel(self.sess, self.summary_writer,
                                              c.TRAIN_HEIGHT, c.TRAIN_WIDTH,
                                              c.SCALE_CONV_FMS_D,
                                              c.SCALE_KERNEL_SIZES_D,
                                              c.SCALE_FC_LAYER_SIZES_D)

        print('Init generator...')
        self.g_model = GeneratorModel(self.sess, self.summary_writer,
                                      c.TRAIN_HEIGHT, c.TRAIN_WIDTH,
                                      c.FULL_HEIGHT, c.FULL_WIDTH,
                                      c.SCALE_FMS_G, c.SCALE_KERNEL_SIZES_G)

        print('Init variables...')
        self.saver = tf.train.Saver(keep_checkpoint_every_n_hours=2)
        self.sess.run(tf.global_variables_initializer())

        # if load path specified, load a saved model
        if model_load_path is not None:
            self.saver.restore(self.sess, model_load_path)
            print('Model restored from ' + model_load_path)
Beispiel #3
0
def getLabel():
	#label(int 값) string으로 반환한다
    if request.method == 'GET':
        conn = sqlite3.connect("/root/POSCHAIR.db")
        c = conn.cursor()

        d = data()
        c.execute("SELECT init_pos_lower FROM User WHERE ID = ?", ("*****@*****.**",))
        lower_origin = c.fetchone()[0]
        #print(lower_origin)

        lower_origin_list = json.loads(lower_origin)

        c.execute("SELECT total_time FROM Keyword WHERE ID = ?", ("*****@*****.**",))
        total_hour = c.fetchone()[0]
        #print(total_hour)

        '''lower_median DB에서 가져옴'''
        c.execute("SELECT lower_median FROM Median WHERE ID = ?", ("*****@*****.**",))
        lower_median = c.fetchone()[0]
        lower_median_list = json.loads(lower_median)
        c.execute("SELECT upper_median FROM Median WHERE ID = ?", ("*****@*****.**",))
        upper_median = c.fetchone()[0]
        upper_median_list = json.loads(upper_median)

        label = 0
Beispiel #4
0
 def getframes(self, videopath):
     frames = getXorY(videopath, self.sequence_step, self.train)
     stacks = []
     for frame in frames.frames:
         stacks.append(self.transform(Image.open(frame)))
     X = torch.stack(stacks)
     return data(frames=X, label=frames.label)
Beispiel #5
0
def run():
  size_latent = 256
  X, input_shape = data()
  discriminator = Discriminator(input_shape).model
  generator = Generator(size_latent, input_shape).model
  gan = GAN(generator, discriminator).model
  train(X, generator, discriminator, gan, size_latent)
Beispiel #6
0
def main(args):
    colorizer = Colorizer()

    X = utils.data(size=(64, 64), count=5000, path=args.data_path)

    np.random.shuffle(X)
    y = np.copy(X)
    X = np.expand_dims(np.mean(X, axis=1), axis=1)
    X = np.repeat(X, repeats=3, axis=1)
    print X.shape

    for i, j in colorizer.train(X, y, epochs=50, batch_size=32):
        sample = X[:100]
        osample = y[:100]
        predicted = colorizer.predict(sample)
        img1 = utils.arrange_images(sample)
        img2 = utils.arrange_images(predicted)
        img3 = utils.arrange_images(osample)
        cv2.imshow('f1', img1)
        cv2.imshow('f2', img2)
        cv2.imshow('f3', img3)
        cv2.waitKey(10)

    colorizer.save(args.model_path)
    colorizer.load(args.mode_path)

    X = utils.data(size=(64, 64), count=8000, path=args.data_path)

    X = np.expand_dims(np.mean(X, axis=1), axis=1)
    X = np.repeat(X, repeats=3, axis=1)
    print X.shape

    for i in range(100):
        sample = X[i * 100:(i + 1) * 100]
        predicted = colorizer.predict(sample)
        img1 = utils.arrange_images(sample)
        img2 = utils.arrange_images(predicted)
        cv2.imshow('f1', img1)
        cv2.imshow('f2', img2)
        cv2.waitKey(0)
def main(args):
    try:
        images, reconstruction = setup_project_data(args.project_directory)
    except Exception as e:
        print("An exception occurred while trying to load project data: {}".format(e))
        return

    # Filepaths
    config_file = utils.data("wireframe.yaml")
    model_file = utils.data("pretrained_lcnn.pth.tar")

    w = wireframe.Wireframe(config_file, model_file, args.device)
    if not w.setup():
        print(w.error)
    else:
        print("w is setup successfully")

    records = wireframe.project.generate_wireframe_records(args.project_directory, w, force=args.recompute)

    if args.reconstruction >= 0:
        reconstruction = [reconstruction[args.reconstruction]]

    wpcs = []

    for r in reconstruction:
        for imname, iminfo in r['shots'].items():
            print("Processing {}...".format(imname))
            wpc = wireframe.WireframePointCloud(args.project_directory,
                    imname,
                    records[imname],
                    iminfo,
                    r['cameras'],
                    line_inlier_thresh=args.l_thresh,
                    color_inliers=args.color_inliers,
                    threshold=args.score_thresh)
            wpcs.append(wpc)
            wpc.write_line_point_clouds()

    return wpcs, records
Beispiel #8
0
def main(args):
    # Filepaths
    config_file = utils.data("wireframe.yaml")
    model_file = utils.data("pretrained_lcnn.pth.tar")

    w = wireframe.Wireframe(config_file, model_file, "")
    if not w.setup():
        print(w.error)
    else:
        print("w is setup successfully")

    # Controls how small the minimum connected component can be
    desired_edges = 2

    for imname in args.image:
        im, g, subgraphs = w.get_filtered_subgraphs(imname, desired_edges)
        g.plot_graph(g.g, im)
        print("\n\nFound {} subgraphs".format(len(subgraphs)))

        for s in subgraphs:
            print("\n\n===================\n\n")
            g.plot_graph(s, im)

        print("\n\nReduced subgraphs to {} graphs".format(len(subgraphs)))
    organisms = ["yeast", "coli", "melanogaster", "human"]
    ppis = ["biogrid", "string", "dip"]

    info = {}

    for organism in organisms:
        for ppi in ppis:
            args = dict(organism=organism,
                        ppi=ppi,
                        expression=True,
                        orthologs=True,
                        sublocalizations=True if organism != 'coli' else False,
                        string_thr=500)

            # Getting the data ----------------------------------
            (edges, edge_weights), X, train_ds, test_ds, genes = data(**args)
            print('Fetched data', ppi, organism)
            # ---------------------------------------------------

            n_labels = len(train_ds) + len(test_ds)
            n_positives = (test_ds[:, 1] == 1).sum() + (train_ds[:, 1]
                                                        == 1).sum()
            n_negatives = (test_ds[:, 1] == 0).sum() + (train_ds[:, 1]
                                                        == 0).sum()
            assert n_labels == n_positives + n_negatives

            key = f'{organism}_{ppi}'
            value = dict(
                number_of_genes=len(genes),
                number_of_genes_ppi=len(np.unique(edges)),
                number_of_edges_ppi=len(edges),
Beispiel #10
0
    j -= 1
    
    if i == rgt and j == lft:
      lft += 1
      rgt -= 1
      maxim = max(maxim, arr[i])
      maximFound = 1
      sorted = 1 if check else 0
      check = 1
      i, j = j, i
      k += 1

  print(k, arr)

# bubbleSortShrinkingInterval([1, 18, 2, 27, 33, 3, 66, -1, 15 ])
bubbleSortShrinkingInterval(data()) # 247 loops for 1K items, 2534 loops for 10K items

# Bubble sort with two opposite pointers
def bubbleSortOppositePointers(arr):
  i, j, k = 0, len(arr) - 1, 0
  sorted = 0
  maxim, maximFound = -math.inf, 0
  
  check = 1
  while not sorted or not maximFound:
    if (i + 1 < len(arr)) and (arr[i] > arr[i + 1]):
      arr[i], arr[i + 1] = arr[i + 1], arr[i]
      check = 0
    i += 1
    
    if j - 1 > 0 and arr[j - 1] > arr[j]:
Beispiel #11
0
 def __repr__(self):
     return "ec2 | %s | %s" % (prompt(self.region),
                               data(self.instance_id or 'all instances')
                               )
    w6_hist = tf.summary.histogram("weights6", w6)
    hypothesis_hist = tf.summary.histogram("hypothesis", hypothesis)

with tf.Session() as sess:
    checkpoint_dir = './checkpoint'
    if not os.path.exists(checkpoint_dir):
        os.makedirs(checkpoint_dir)

    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver()

    if ckpt_load(sess, checkpoint_dir, saver):
        print(" [*] Load SUCCESS")
    else:

        print(" [!] Load failed...")

    t = threading.Thread(target=skeleton_socket)
    t.start()

    while (True):
        if (skeleton_data.ndim == 2 and skeleton_data.shape[0] == 30):
            m1_data = skeleton_data[:, 3:].reshape(-1, 30, 57)
            m2_data = data(skeleton_data).reshape(-1, 30, 9)
            predict = sess.run([hypothesis],
                               feed_dict={
                                   m1_X: m1_data,
                                   m2_X: m2_data
                               })
            print(np.argmax(predict))
Beispiel #13
0
                    default=100)
parser.add_argument('--mu',
                    help='hyper-parameter for label propagation',
                    type=float,
                    default=10)
parser.add_argument('--keep_prob',
                    help='keep probability for dropout',
                    type=float,
                    default=1.0)
# In particular, $\alpha = (label_context_batch_size)/(graph_context_batch_size + label_context_batch_size)$.
# A larger value is assigned to graph_context_batch_size if graph structure is more informative than label information.

args = parser.parse_args()
print args

data = data(args)

print_every_k_iters = 1
start_time = time.time()

with tf.Session() as session:
    model = Model(args, data, session)
    iter_cnt = 0
    augument_size = int(len(model.label_x))
    while True:
        iter_cnt += 1
        curr_loss_label_propagation, curr_loss_u_1, curr_loss_u_2 = (0.0, 0.0,
                                                                     0.0)

        average_loss_u_1 = 0
        average_loss_u_2 = 0
Beispiel #14
0
 def __repr__(self):
     return "lb | %s | %s" % (prompt(self.region),
                              data(self.connected_to or 'Not connected')
                              )
Beispiel #15
0
 def __repr__(self):
     return "s3 | %s | %s" % (prompt(self.region),
                              data(self.bucket or 'all instances')
                              )
Beispiel #16
0
# 定义超参数
parser = argparse.ArgumentParser()
#本地
#parser.add_argument('--data_path', type=str, default="/Users/ren/Desktop/nlp相关/实验1/aclImdb/")#文件路径
parser.add_argument('--data_path', type=str, default="data/")#文件路径
parser.add_argument('--embed_size', type=int, default=300)#embeding层宽度
parser.add_argument('--hidden_size', type=int, default=128)
parser.add_argument('--seq_len', type=int, default=20)#文件长度,需要截断和填充
parser.add_argument('--batch_size', type=int, default=64)#批次
parser.add_argument('--bidirectional', type=bool, default=True)#是否开启双向
parser.add_argument('--classification_num', type=int, default=4)#分类个数
parser.add_argument('--lr', type=float, default=1e-3)#学习率
parser.add_argument('--dropout', type=float, default=0.5)#丢弃率
parser.add_argument('--num_epochs', type=int, default=100)#训练论数
parser.add_argument('--vocab_size', type=int, default=0)#vocab大小
parser.add_argument('--if_vail', type=bool, default=True)
parser.add_argument('--word2vec_path', type=str, default="/Users/ren/Desktop/nlp相关/glove_to_word2vec.txt")#预训练词向量路径
#parser.add_argument('--word2vec_path', type=str, default="/data/renhongjie/zouye1_new/data/glove_to_word2vec.txt")#预训练词向量路径
parser.add_argument('--save_path', type=str, default="best3.pth")#保存路径
parser.add_argument('--weight_decay', type=float, default=1e-4)#权重衰减
args = parser.parse_args()
if args.if_vail:
    train_iter, test_iter,vail_iter,weight = utils.data(args)
else:
    train_iter, test_iter, weight = utils.data(args)
net=model.ESIM(args,weight=weight)
if args.if_vail:
    train.train(args, device, net,train_iter, test_iter,vail_iter)
else:
    train.train(args,device,train_iter,test_iter,None)
Beispiel #17
0
parser.add_argument('--data_path', type=str, default="/Users/ren/Desktop/nlp相关/实验1/aclImdb/")#文件路径
#parser.add_argument('--data_path', type=str, default="/data/renhongjie/zouye1_new/data/aclImdb/")#文件路径
parser.add_argument('--embed_size', type=int, default=300)#embeding层宽度
parser.add_argument('--num_hidens', type=int, default=100)
parser.add_argument('--seq_len', type=int, default=300)#文件长度,需要截断和填充
parser.add_argument('--batch_size', type=int, default=64)#批次
parser.add_argument('--bidirectional', type=bool, default=True)#是否开启双向
parser.add_argument('--num_classes', type=int, default=2)#分类个数
parser.add_argument('--lr', type=float, default=1e-4)#学习率
parser.add_argument('--droput', type=float, default=0.5)#丢弃率
parser.add_argument('--num_epochs', type=int, default=10)#训练论数
parser.add_argument('--vocab_size', type=int, default=0)#vocab大小
parser.add_argument('--save_path', type=str, default="best.pth")#保存路径
parser.add_argument('--CLS', type=str, default="[CLS]")#CLS标记
parser.add_argument('--PAD', type=str, default="[PAD]")#PAD标记
parser.add_argument('--weight_decay', type=float, default=1e-4)#权重衰减
args = parser.parse_args()
tokenizer = AutoTokenizer.from_pretrained("./bert-base-uncased")
model = AutoModel.from_pretrained("./bert-base-uncased")

parser.add_argument('--tokenizer', default=tokenizer)#保存路径
parser.add_argument('--bert_model', default=model)#保存路径
parser.add_argument('--hidden_size', type=int, default=768)#看模型配置,768
args = parser.parse_args()
train_iter,test_iter,vail_iter=utils.data(args)
# inputs = args.tokenizer("Hello world!", return_tensors="pt")
# outputs = args.bert_model(**inputs)
# print(outputs)
net=bert.Model(args)
net.to(device)
train.train(args,device,net,train_iter,test_iter,vail_iter)
def evalDann():

    print("\nParameters:")
    for attr, value in sorted(FLAGS.__flags.items()):
        print("{}={}".format(attr.upper(), value))
    print("")

    x_test, y_test, v_test = data(["books"], "test", ".test.pickle")

    print("\nEvaluating...\n")
    # Evaluation
    # ==================================================
    checkpoint_file = tf.train.latest_checkpoint(FLAGS.checkpoint_dir)
    logging.info("############\nEvaluating for " + checkpoint_file)
    logging.info(FLAGS.checkpoint_dir)

    graph = tf.Graph()
    with graph.as_default():
        session_conf = tf.ConfigProto(
            allow_soft_placement=FLAGS.allow_soft_placement,
            log_device_placement=FLAGS.log_device_placement)
        sess = tf.Session(config=session_conf)
        with sess.as_default():
            # Load the saved meta graph and restore variables
            saver = tf.train.import_meta_graph(
                "{}.meta".format(checkpoint_file))
            saver.restore(sess, checkpoint_file)

            X = graph.get_operation_by_name("X").outputs[0]
            y = graph.get_operation_by_name("y").outputs[0]
            # domain = graph.get_operation_by_name("domain").outputs[0]
            train = graph.get_operation_by_name("train").outputs[0]
            predictions = graph.get_operation_by_name(
                "label_predictor/prediction").outputs[0]

            # Generate batches for one epoch
            batches_x = read.batch_iter(list(x_test),
                                        FLAGS.batch_size,
                                        1,
                                        shuffle=False)
            batches_y = read.batch_iter(list(y_test),
                                        FLAGS.batch_size,
                                        1,
                                        shuffle=False)
            # Collect the predictions here
            all_predictions = [[0, 0], [0, 0]]
            for x_test_batch, y_test_batch in zip(batches_x, batches_y):
                # test_x = np.vstack([x_test_batch])
                # test_y = np.vstack([y_test_batch])
                # batch_predictions = sess.run(predictions,{X: test_x,y :test_y,train :False})
                batch_predictions = sess.run(predictions, {
                    X: x_test_batch,
                    y: y_test_batch,
                    train: False
                })
                all_predictions = np.concatenate(
                    (all_predictions, batch_predictions), axis=0)

            all_predictions = all_predictions[2:]

    # Print accuracy if y_test is defined
    if y_test is not None:
        correct_predictions = compute_accuracy_count(all_predictions, y_test)
        logging.info("Total number of test examples: {}".format(len(y_test)))
        logging.info("Accuracy: {:g}".format(correct_predictions /
                                             float(len(y_test))))

    # Save the evaluation to a csv
    predictions_human_readable = np.column_stack(
        (np.array(x_test), all_predictions))
    out_path = os.path.join(FLAGS.checkpoint_dir, "..", "prediction.csv")
    logging.info("Saving evaluation to {0}".format(out_path))
    with open(out_path, 'w') as f:
        csv.writer(f).writerows(predictions_human_readable)