Esempio n. 1
0
def predict(args):
    device = torch.device(args.device)
    cls_embed = torch.load(args.embedding_save_path)
    model = textCNN(args.embedding_size, args.cls_num, args.l1_channels_num)
    model.load_state_dict(torch.load(args.model_save_path))
    model.eval()
    model = model.to(device)
    cls_embed = cls_embed.to(device)
    cls_embed.eval()
    dev_dataset = DataSet(args.predict_data, None, args.batch_size)
    dev_dataset.reorderForEval()
    towrite = open(args.predict_writeto, "w+")
    towrite.write("idx,labels\n")
    idx = 0
    print("Begin Predict task...")
    while (True):
        (example, dump), p = dev_dataset.getPredictBatch(False)
        example = cls_embed(example, device=device)
        #    print(example.size())
        outs = model(example)
        outs = (torch.argmax(outs, -1) + 1).squeeze().tolist()
        for out in zip(example[1], outs):
            towrite.write("{0},{1}\n".format(out[0], int(out[1])))
            idx += 1
        if (p): break
    towrite.close()
    print("Predict task Done!")
Esempio n. 2
0
def predict(args):
    device = torch.device(args.device)
    cls_embed = torch.load(args.embedding_save_path)
    model = textCNN(args.embedding_size, args.cls_num, args.l1_channels_num)
    model.load_state_dict(torch.load(args.model_save_path))
    model.eval()
    model = model.to(device)
    cls_embed = cls_embed.to(device)
    cls_embed.eval()
    dev_dataset = DataSet(args.predict_data, None, args.batch_size)
    towrite = open(args.predict_writeto, "w+")
    towrite.write("idx,labels\n")
    idx = 0
    print("Begin Predict task...")
    while (True):
        example, p = dev_dataset.getPredictBatch()
        example = cls_embed(example, device=device)
        #    print(example.size())
        out = model(example)
        out = torch.argmax(out, -1).item() + 1
        towrite.write("{0},{1}\n".format(idx, out))
        idx += 1
        if (p): break
    towrite.close()
    print("Predict task Done!")
Esempio n. 3
0
def read_data_sets(validation_size=0.1):
    test_images = np.array([el['img'] for el in test_data])
    test_labels = np.array([el['label'] for el in test_data])
    test_labels = np.eye(meta['n_classes'])[test_labels]
    train_images = np.array([el['img'] for el in train_data])
    train_labels = np.array([el['label'] for el in train_data])
    train_labels = np.eye(meta['n_classes'])[train_labels]

    if 0 <= validation_size < 1.0:
        validation_size = int(validation_size * len(train_images))

    if not 0 <= validation_size <= len(train_images):
        raise ValueError(
            'Validation size should be between 0 and {}. Received: {}.'
            .format(len(train_images), validation_size))
    # Shuffle data
    perm = np.arange(len(train_labels))
    np.random.shuffle(perm)
    train_images = train_images[perm]
    train_labels = train_labels[perm]

    # Split training set in training and validation set
    validation_images = train_images[:validation_size]
    validation_labels = train_labels[:validation_size]
    train_images = train_images[validation_size:]
    train_labels = train_labels[validation_size:]

    train = DataSet(train_images, train_labels, dtype=np.float32)
    validation = DataSet(validation_images,
                         validation_labels,
                         dtype=np.float32)
    test = DataSet(test_images, test_labels, dtype=np.float32)

    return base.Datasets(train=train, validation=validation, test=test)
Esempio n. 4
0
def main():
    imgs = tf.placeholder(tf.float32, [None, IMAGE_WIDTH * IMAGE_HEIGHT])
    keys = tf.placeholder(tf.float32, [None, N_CLASS])

    train_model = create_CNN(imgs, Weight_Dicts, Biases_Dict, Dropout_Dict)
    # Define loss and optimizer 
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(train_model, keys))
    optimizer = tf.train.AdamOptimizer(0.01).minimize(cost)

    # Evaluate the train model
    correct_model = tf.equal(tf.argmax(train_model, 1), tf.argmax(keys, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_model, tf.float32))

    init = tf.initialize_all_variables()
    
    obj = DataSet()
    obj.load()
    with tf.Session() as sess:
        sess.run(init)
        step = 1
        while step * BATCH_SIZE < TRAINING_ITERS:
            batch_imgs, batch_keys = obj.next_batch(BATCH_SIZE)
            sess.run(optimizer, feed_dict = {imgs : batch_imgs, keys : batch_keys})
            if step % DISPLAY_STEP == 0:
                acc = sess.run(accuracy, feed_dict = {imgs : batch_imgs, keys : batch_keys})
                loss = sess.run(cost, feed_dict = {imgs : batch_imgs, keys : batch_keys})
                print "Iter " + str(step * BATCH_SIZE) + ", MiniBatch Loss = " + "{:.6f}".format(loss) + ", Training Accuracy = " + "{:.5f}".format(acc)
            step  = step + 1
    def __init__(self, args):

        self.args = {}
        self.args['dataset'] = args.dataset_name
        self.args["batch_size"] = args.batch_size
        self.args['cuda'] = args.cuda
        self.args['device'] = args.device
        self.args['epoch'] = args.epoch

        self.args['in_length'] = args.obs_length
        self.args['out_length'] = args.pred_length
        self.args['save_name'] = args.save_name
        self.args['load_name'] = args.load_name

        # self.args['nll_only'] = True
        self.args["learning_rate"] = 1e-4
        self.args["w_decay"] = 1e-4

        self.args['name'] = 'test.tar'
        self.args["optim"] = 'Adam'
        self.args['train_loss'] = 'MSE'

        self.wandb = False
        if args.wandb:
            self.wandb = True
            print("Wandb is initialized...")
            wandb.init(project="vanilla_gru",\
                # name="obs: {}, pred: {}".format(args.obs_length, args.pred_length),

                config=self.args)

        self.net = NNPred(args)
        if self.args['cuda']:
            self.net = self.net.cuda(self.args['device'])

        # for training// dataset
        self.train_dataset = DataSet(args, 'train')
        self.val_dataset = DataSet(args, 'val')
        self.test_dataset = DataSet(args, 'test')
        # for i in range(12300, 12500):
        # hist, hist_mask, fut, fut_mask, ref_pose, AgentInfo = self.test_dataset[0]
        # print(AgentInfo)
        # print(hist)
        # print(fut)
        # # A = fut+ref_pose#// These lines for
        # # A[:,0] = ((A[:,0]+1)/2)*(self.test_dataset.max_position_x - self.test_dataset.min_position_x)+ self.test_dataset.min_position_x
        # # A[:,1] = ((A[:,1]+1)/2)*(self.test_dataset.max_position_y - self.test_dataset.min_position_y)+ self.test_dataset.min_position_y
        # print(A)
        # # print(fut[:,1]+ref_pose[1])
        # quit()

        self.trainDataloader = DataLoader(self.train_dataset, batch_size=self.args["batch_size"], shuffle=True, \
                                 num_workers=6, collate_fn=self.train_dataset.GetBatch, drop_last=True)
        # print("trainDataloader completed!")
        self.valDataloader = DataLoader(self.val_dataset, batch_size=self.args["batch_size"], shuffle=True, \
                                num_workers=6, collate_fn=self.val_dataset.GetBatch, drop_last=True)
        # print("valDataloader completed!")
        self.testDataloader = DataLoader(self.test_dataset, batch_size=self.args["batch_size"], shuffle=True, \
                                num_workers=6, collate_fn=self.test_dataset.GetBatch, drop_last=True)
Esempio n. 6
0
    def analyze_result(self, paths):
        image_paths = sorted(glob(os.path.join(paths, '*')))
        image, wides = DataSet().get_imges(image_paths)

        def getlab(x):
            result = x.split('_')[-1].replace('.jpg', '')
            result = result.replace('.png', '')
            return result

        labels = map(getlab, image_paths)

        inputs = tf.placeholder(tf.float32, [None, 32, None, 1])
        width = tf.placeholder(tf.int32, [None])
        is_training = tf.placeholder(tf.bool)
        logits, sequence_length = self.crnn(inputs, width, is_training)

        decoder, probably = tf.nn.ctc_greedy_decoder(logits, sequence_length, merge_repeated=True)
        decoder = decoder[0]

        dense_decoder = tf.sparse_to_dense(sparse_indices=decoder.indices, output_shape=decoder.dense_shape,
                                           sparse_values=decoder.values, default_value=-1)
        with tf.Session() as sess:
            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, config.MODEL_SAVE)

            result = []

            if image.shape[0] <= config.BATCH_SIZE:
                sentence = sess.run(dense_decoder, feed_dict={inputs: image, width: wides, is_training: False})

                sentence = sentence.tolist()

                decode = dict(zip(config.ONE_HOT.values(), config.ONE_HOT.keys()))

                result.extend(sentence)

            else:
                index = 0
                while (index + 1) * config.BATCH_SIZE <= image.shape[0]:
                    if (index + 1) * config.BATCH_SIZE > image.shape[0]:
                        end = image.shape[0]
                    else:
                        end = (index + 1) * config.BATCH_SIZE

                    sentence = sess.run(dense_decoder, feed_dict={inputs: image[index * config.BATCH_SIZE:end, ...],
                                                                  width: wides[index * config.BATCH_SIZE:end, ...],
                                                                  is_training: True})

                    sentence = sentence.tolist()

                    decode = dict(zip(config.ONE_HOT.values(), config.ONE_HOT.keys()))

                    result.extend(sentence)
                    index = index + 1

            result = list(map(lambda y: ''.join(list(map(lambda x: decode.get(x), y))), result))

            result = dict(zip(labels, result))

        print(result)
Esempio n. 7
0
def run_sc_train(config):
    """Load problem."""
    if not os.path.exists(config.probfn):
        raise ValueError("Problem file not found.")
    else:
        p = problem.load_problem(config.probfn)
    """Set up model."""
    model = setup_model(config, A=p.A)

    # Create target Directory if don't exist
    dirName = config.modelfn
    if not os.path.exists(dirName):
        os.mkdir(dirName)
        print("Directory ", dirName, " Created ")
    else:
        print("Directory ", dirName, " already exists")

    # model.weights[2].assign(2 * model.weights[2].numpy())
    model.weights[2].assign((1234.0, 567.0))
    model.weights[3].assign((1234.0, 567.0))

    model.save_weights(config.modelfn + '/ckpt')
    print('model weights ... ')
    print(model.weights[2])
    print(model.weights[3])

    loaded_model = setup_model(config, A=p.A)
    loaded_model.load_weights(config.modelfn + '/ckpt')

    print('loaded model weights ... ')
    print(loaded_model.weights[2])
    print(loaded_model.weights[3])
    # loaded_model.load_weights('ckpt')

    # model.load_weights('my_model.h5', by_name=False, skip_mismatch=False)
    """Set up input."""
    config.SNR = np.inf if config.SNR == 'inf' else float(config.SNR)

    data_set = DataSet.DataSet(config, p)
    """Set up training."""
    stages = train.setup_sc_training(model, data_set, None, config.init_lr,
                                     config.decay_rate, config.lr_decay)

    tfconfig = tf.compat.v1.ConfigProto(allow_soft_placement=True)
    tfconfig.gpu_options.allow_growth = True

    # start timer
    start = time.time()

    # train model
    model.do_training(stages, data_set, config.modelfn, config.scope,
                      config.val_step, config.maxit, config.better_wait)

    # end timer
    end = time.time()
    elapsed = end - start
    print("elapsed time of training = " + str(timedelta(seconds=elapsed)))
Esempio n. 8
0
def readDataSetFile(filename, F):
	with open(filename) as f:
		E = DataSet()
		for line in f:
			line =line.split()
			if len(line) != len(F):
				print("Number of attributes in the Training record:"+str(line)+"does not match the number in the attribute file")
				raise Exception('DataValidation')
			d  = Data()
			idx= 0
			for value in ine:
				if value in F[idx].values:
					d.addValue(F[idx].name,value)
				else:
					print('Value='+value+' is not a valid value for attribute='+ F[idx].name)
				idx += 1
			E.addData(d)
		f.close()
Esempio n. 9
0
def train(config):
    model = SelfAttNet(config)
    model.train()
    dataset = DataSet(cofig)

    for epoch in range(config.num_epochs):
        for data in dataset:
            train_x, train_y = data
            train_x = Variable(convert_words(train_x))
            true_y = Variable(train_y)
            emb_x, P = model(train_x)
Esempio n. 10
0
def main():
    imgs = tf.placeholder(tf.float32, [None, IMAGE_WIDTH * IMAGE_HEIGHT])
    keys = tf.placeholder(tf.float32, [None, N_CLASS])

    train_model = create_CNN(imgs, Weight_Dicts, Biases_Dict, Dropout_Dict)
    # Define loss and optimizer
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(train_model, keys))
    optimizer = tf.train.AdamOptimizer(0.01).minimize(cost)

    # Evaluate the train model
    correct_model = tf.equal(tf.argmax(train_model, 1), tf.argmax(keys, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_model, tf.float32))

    init = tf.initialize_all_variables()

    obj = DataSet()
    obj.load()
    with tf.Session() as sess:
        sess.run(init)
        step = 1
        while step * BATCH_SIZE < TRAINING_ITERS:
            batch_imgs, batch_keys = obj.next_batch(BATCH_SIZE)
            sess.run(optimizer, feed_dict={imgs: batch_imgs, keys: batch_keys})
            if step % DISPLAY_STEP == 0:
                acc = sess.run(accuracy,
                               feed_dict={
                                   imgs: batch_imgs,
                                   keys: batch_keys
                               })
                loss = sess.run(cost,
                                feed_dict={
                                    imgs: batch_imgs,
                                    keys: batch_keys
                                })
                print "Iter " + str(
                    step *
                    BATCH_SIZE) + ", MiniBatch Loss = " + "{:.6f}".format(
                        loss) + ", Training Accuracy = " + "{:.5f}".format(acc)
            step = step + 1
Esempio n. 11
0
def main(*args):
	# Hyper parameters
	learning_rate = 0.001
	training_steps = 10000
	valid_step = 50
	cell_size = 256
	num_rnn_layers = 1

	dataset = DataSet(FLAGS.dataset)
	model = Model(dataset.samples_shape[1],
				  dataset.labels_shape[1],
				  dataset.labels_shape[2],
				  cell_size,
				  num_rnn_layers,
				  learning_rate,
				  cell_type='lstm')

	with tf.Session() as sess:
		tf.global_variables_initializer().run()
		loss = []
		for step in range(training_steps):
			train_samples, train_labels, train_weights = dataset.get_batch(FLAGS.batch_size, 'train')
			train_labels_T = np.transpose(train_labels, (1, 0, 2))
			_loss, prediction = model.step(train_samples, train_labels_T, train_weights, sess)
			# loss.append(_loss)
			if (step % valid_step) == 0:
				# print("Average training loss: %s" % np.mean(loss))
				# loss = []
				valid_samples, valid_labels, valid_weights = dataset.get_batch(FLAGS.batch_size, 'valid')
				valid_labels_T = np.transpose(valid_labels, (1, 0, 2))
				v_loss, v_prediction = model.step(valid_samples, valid_labels_T, valid_weights, sess, valid=True)
				print("Valid loss @ step %s: %s" % (step,v_loss))
				for p in v_prediction:
					pred = decode_ohe(p)
					cleaned_pred = clean_prediction(pred)
					print(cleaned_pred)
Esempio n. 12
0
    def output(self, path):
        image, wides = DataSet().get_imges([path])
        inputs = tf.placeholder(tf.float32, [None, 32, None, 1])
        width = tf.placeholder(tf.int32, [None])
        is_training = tf.placeholder(tf.bool)
        logits, sequence_length = self.crnn(inputs, width, is_training)

        decoder_greey, probably_greedy = tf.nn.ctc_greedy_decoder(
            logits, sequence_length, merge_repeated=True)
        with tf.device('/cpu:0'):
            decoders, probably = tf.nn.ctc_beam_search_decoder(
                logits,
                sequence_length,
                beam_width=20,
                top_paths=5,
                merge_repeated=False)

        decoder_list = []

        for decoder in decoders:
            dense_decoder = tf.sparse_to_dense(
                sparse_indices=decoder.indices,
                output_shape=decoder.dense_shape,
                sparse_values=decoder.values,
                default_value=-1)
            decoder_list.append(dense_decoder)

        # classs = tf.argmax(logits,-1)

        # classs = tf.squeeze(classs,axis=-1)

        logits = tf.nn.softmax(logits)

        # classs = tf.nn.top_k(logits,10)

        with tf.Session() as sess:

            saver = tf.train.Saver(tf.global_variables())
            saver.restore(sess, config.MODEL_SAVE)

            t1 = time.time()
            decoder_list, logits_, probably_ = sess.run(
                [decoder_list, logits, probably],
                feed_dict={
                    inputs: image,
                    width: wides,
                    is_training: False
                })
            t2 = time.time()
            print(t2 - t1)

            t3 = time.time()
            _, _ = sess.run([decoder_greey, logits],
                            feed_dict={
                                inputs: image,
                                width: wides,
                                is_training: False
                            })
            t4 = time.time()
            print(t4 - t3)

            t5 = time.time()
            logits_ = sess.run(logits,
                               feed_dict={
                                   inputs: image,
                                   width: wides,
                                   is_training: False
                               })
            logits_ = logits_[:, 0, :]
            logits_ = logits_[:, config.NUM_SIGN]
            decoder_cpu = beam_search_decoder(logits_, 5)
            t6 = time.time()
            print(t6 - t5)
            for result in decoder_cpu:

                def get_char(num):
                    return config.DECODE[num]

                result_0 = map(get_char, result[0])
                print(''.join(result_0))
                print(result[1])
            print(
                '------------------------------------------------------------------------------------------------------------'
            )

            decode = dict(zip(config.ONE_HOT.values(), config.ONE_HOT.keys()))

            result_list = []

            for sentence in decoder_list:
                sentence = sentence.tolist()
                result = ''.join(
                    list(map(lambda x: decode.get(x), sentence[0])))
                result_list.append(result)

        for i, result in enumerate(result_list):
            print(result)
Esempio n. 13
0
def train_vectors(options, sens=None):
    # check vectors and ckpt
    checkpoint = '0'
    train_vec_dir = os.path.split(options.vectors_path)[0]
    ckpt_dir = os.path.join(train_vec_dir, 'ckpt')
    ckpt = tf.train.get_checkpoint_state(ckpt_dir)
    if ckpt and ckpt.model_checkpoint_path:
        cur_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
        logger.info(
            "model and vectors already exists, checkpoint step = {}".format(
                cur_step))
        checkpoint = input(
            "please input 0 to start a new train, or input a choosed ckpt to restore (-1 for latest ckpt)"
        )
    if checkpoint == '0':
        if ckpt:
            tf.gfile.DeleteRecursively(ckpt_dir)
        logger.info('start a new embedding train using tensorflow ...')
    elif checkpoint == '-1':
        logger.info(
            'restore a embedding train using tensorflow from latest ckpt...')
    else:
        logger.info(
            'restore a embedding train using tensorflow from ckpt-%s...' %
            checkpoint)
    if not os.path.exists(train_vec_dir):
        os.makedirs(train_vec_dir)
    if not os.path.exists(ckpt_dir):
        os.makedirs(ckpt_dir)

    # generate training examples
    logger.info("Generate training examples:")
    idx_vocab_freq_file = os.path.join(train_vec_dir, 'vocab.freq')
    logger.info('\t corpus_store_path = {}'.format(options.corpus_store_path))
    logger.info(
        '\t vocab and frequencies file = {}'.format(idx_vocab_freq_file))
    logger.info('\t walk_workers to load dataset = {}'.format(
        options.walk_workers))
    logger.info('\t window_size = {}'.format(options.window_size))
    data, labels, idx2vocab, nodes_frequencies = generate_train_data(
        options.corpus_store_path,
        options.headflag_of_index_file,
        options.walk_workers,
        options.window_size,
        idx_vocab_freq_file,
        sens=sens,
        always_rebuild=options.always_rebuild)
    del sens
    gc.collect()
    dataset = DataSet(data=data,
                      labels=labels,
                      shuffled=not options.unshuffled)

    lr_file = os.path.join(train_vec_dir, "lr.info")
    np.savetxt(
        lr_file,
        np.asarray(
            [options.learning_rate, options.decay_epochs, options.decay_rate],
            dtype=np.float32),
        fmt="%.6f")

    # train info
    logger.info('Train info:')
    logger.info('\t total embedding nodes = {}'.format(len(idx2vocab)))
    logger.info('\t total training examples = {}'.format(len(data)))
    logger.info('\t shuffled in training = {}'.format(not options.unshuffled))
    logger.info('\t embedding size = {}'.format(options.embedding_size))
    logger.info('\t window size = {}'.format(options.window_size))
    logger.info('\t negative = {}'.format(options.negative))
    logger.info('\t distortion_power = {}\n'.format(options.distortion_power))
    logger.info('\t batch_size = {}'.format(options.batch_size))
    logger.info('\t iter_epoches = {}'.format(options.iter_epoches))
    logger.info('\t init_learning_rate = {}'.format(options.learning_rate))
    logger.info('\t decay_epochs = {}'.format(options.decay_epochs))
    logger.info('\t decay_interval = {}'.format(options.decay_interval))
    logger.info('\t decay_rate = {}'.format(options.decay_rate))
    logger.info('\t loss_interval = {}s'.format(options.loss_interval))
    logger.info('\t summary_steps = {}'.format(options.summary_steps))
    logger.info('\t summary_interval = {}s'.format(options.summary_interval))
    logger.info('\t ckpt_epochs = {}'.format(options.ckpt_epochs))
    logger.info('\t ckpt_interval = {}s\n'.format(options.ckpt_interval))
    logger.info('\t using_gpu = {}'.format(options.using_gpu))
    logger.info('\t visible_device_list = {}'.format(
        options.visible_device_list))
    logger.info('\t log_device_placement = {}'.format(
        options.log_device_placement))
    logger.info('\t allow_soft_placement = {}'.format(
        options.allow_soft_placement))
    logger.info('\t gpu_memory_fraction = {}'.format(
        options.gpu_memory_fraction))
    logger.info('\t gpu_memory_allow_growth = {}'.format(options.allow_growth))
    logger.info('\t train_workers = {}\n'.format(options.train_workers))

    logger.info('\t ckpt_dir = {}'.format(ckpt_dir))
    logger.info('\t vectors_path = {}'.format(options.vectors_path))
    logger.info('\t learning_rate_path = {}'.format(lr_file))

    fr_vec = open(os.path.join(train_vec_dir, 'embedding.info'), 'w')
    fr_vec.write('embedding info:\n')
    fr_vec.write('\t corpus_store_path = {}\n'.format(
        options.corpus_store_path))
    fr_vec.write(
        '\t vocab and frequencies file = {}\n'.format(idx_vocab_freq_file))
    fr_vec.write('\t total embedding nodes = {}\n'.format(len(idx2vocab)))
    fr_vec.write('\t total training examples = {}\n'.format(len(data)))
    fr_vec.write(
        '\t shuffled in training = {}\n'.format(not options.unshuffled))
    fr_vec.write('\t embedding size = {}\n'.format(options.embedding_size))
    fr_vec.write('\t window size = {}\n'.format(options.window_size))
    fr_vec.write('\t negative = {}\n'.format(options.negative))
    fr_vec.write('\t distortion_power = {}\n\n'.format(
        options.distortion_power))
    fr_vec.write('\t batch_size = {}\n'.format(options.batch_size))
    fr_vec.write('\t iter_epoches = {}\n'.format(options.iter_epoches))
    fr_vec.write('\t init_learning_rate = {}\n'.format(options.learning_rate))
    fr_vec.write('\t decay_epochs = {}\n'.format(options.decay_epochs))
    fr_vec.write('\t decay_interval = {}\n'.format(options.decay_interval))
    fr_vec.write('\t decay_rate = {}\n'.format(options.decay_rate))
    fr_vec.write('\t loss_interval = {}s\n'.format(options.loss_interval))
    fr_vec.write('\t summary_steps = {}\n'.format(options.summary_steps))
    fr_vec.write('\t summary_interval = {}s\n'.format(
        options.summary_interval))
    fr_vec.write('\t ckpt_epochs = {}\n'.format(options.ckpt_epochs))
    fr_vec.write('\t ckpt_interval = {}s\n\n'.format(options.ckpt_interval))
    fr_vec.write('\t using_gpu = {}\n'.format(options.using_gpu))
    fr_vec.write('\t visible_device_list = {}\n'.format(
        options.visible_device_list))
    fr_vec.write('\t log_device_placement = {}\n'.format(
        options.log_device_placement))
    fr_vec.write('\t allow_soft_placement = {}\n'.format(
        options.allow_soft_placement))
    fr_vec.write('\t gpu_memory_fraction = {}\n'.format(
        options.gpu_memory_fraction))
    fr_vec.write('\t gpu_memory_allow_growth = {}\n'.format(
        options.allow_growth))
    fr_vec.write('\t train_workers = {}\n\n'.format(options.train_workers))

    fr_vec.write('\t ckpt_dir = {}\n'.format(ckpt_dir))
    fr_vec.write('\t vectors_path = {}\n'.format(options.vectors_path))
    fr_vec.write('\t learning_rate_path = {}\n'.format(lr_file))

    fr_vec.close()

    visible_devices = str(options.visible_device_list[0])
    for dev in options.visible_device_list[1:]:
        visible_devices = visible_devices + ',%s' % dev
    os.environ['CUDA_VISIBLE_DEVICES'] = visible_devices

    # train
    logger.info('training...')
    time_start = time.time()
    train(dataset=dataset,
          vectors_path=options.vectors_path,
          lr_file=lr_file,
          ckpt_dir=ckpt_dir,
          checkpoint=checkpoint,
          idx2vocab=idx2vocab,
          vocab_unigrams=nodes_frequencies,
          embedding_size=options.embedding_size,
          neg_sampled=options.negative,
          distortion_power=options.distortion_power,
          batch_size=options.batch_size,
          initial_learning_rate=options.learning_rate,
          decay_epochs=options.decay_epochs,
          decay_rate=options.decay_rate,
          iter_epochs=options.iter_epoches,
          allow_soft_placement=options.allow_soft_placement,
          log_device_placement=options.log_device_placement,
          gpu_memory_fraction=options.gpu_memory_fraction,
          using_gpu=options.using_gpu,
          allow_growth=options.allow_growth,
          loss_interval=options.loss_interval,
          summary_steps=options.summary_steps,
          ckpt_interval=options.ckpt_interval,
          ckpt_epochs=options.ckpt_epochs,
          summary_interval=options.summary_interval,
          decay_interval=options.decay_interval,
          train_workers=options.train_workers)
    logger.info('train completed in {}s'.format(time.time() - time_start))
    return
Esempio n. 14
0
lambda_f = 0.04
k_of_knn = 10
m_of_knn = 50
SEED = 666
np.random.seed(SEED)

data_dict = sio.loadmat('MNIST')
# normalization
samples = data_dict['fea'] / 256
labels = dense_to_one_hot(data_dict['gnd'], num_classes)

# 有标记数据
# l_index = np.random.randint(5000, size=labeled_data_size)
l_index = equal_proportion_sampling(data_dict['gnd'][:5000], num_classes,
                                    labeled_data_size)
l_dataset = DataSet(samples[l_index], labels[l_index])

# 无标记数据
# u_index = np.arange(5000, 60000)
u_index = np.random.randint(5000, 60000, size=2000)
uindex_dataset = DataSet(u_index, labels[u_index])

sample_data = np.vstack((samples[l_index], samples[u_index]))
sample_label = np.vstack((labels[l_index], labels[u_index]))

# 测试数据
test_dataset = DataSet(samples[60000:], labels[60000:])

neighborIndex, remoteIndex, RBF_matrix = find_neighbors(
    samples,
    l_index,
Esempio n. 15
0
def run_sc_test(config):
    """
    Test model.
    """
    """Load problem."""
    if not os.path.exists(config.probfn):
        raise ValueError("Problem file not found.")
    else:
        p = problem.load_problem(config.probfn)
    """Load testing data."""
    xt = np.load(config.xtest)
    """Set up input for testing."""
    config.SNR = np.inf if config.SNR == 'inf' else float(config.SNR)

    data_set = DataSet.DataSet(config, p)

    input_, label_ = (train.setup_input_sc(config.test, p, xt.shape[1], None,
                                           False, config.supp_prob, config.SNR,
                                           config.magdist, **config.distargs))
    """Set up model."""
    model = setup_model(config, A=p.A)
    xhs_ = model.inference(input_, None)
    """Create session and initialize the graph."""
    tfconfig = tf.compat.v1.ConfigProto(allow_soft_placement=True)
    tfconfig.gpu_options.allow_growth = True

    with tf.compat.v1.Session(config=tfconfig) as sess:
        # graph initialization
        sess.run(tf.compat.v1.global_variables_initializer())
        # load model
        model.load_trainable_variables(config.modelfn)

        nmse_denom = np.sum(np.square(xt))
        supp_gt = xt != 0

        lnmse = []
        lspar = []
        lsperr = []
        lflspo = []
        lflsne = []

        # test model
        for xh_ in xhs_:
            xh = sess.run(xh_, feed_dict={label_: xt})

            # nmse:
            loss = np.sum(np.square(xh - xt))
            nmse_dB = 10.0 * np.log10(loss / nmse_denom)
            print(nmse_dB)
            lnmse.append(nmse_dB)

            supp = xh != 0.0
            # intermediate sparsity
            spar = np.sum(supp, axis=0)
            lspar.append(spar)

            # support error
            sperr = np.logical_xor(supp, supp_gt)
            lsperr.append(np.sum(sperr, axis=0))

            # false positive
            flspo = np.logical_and(supp, np.logical_not(supp_gt))
            lflspo.append(np.sum(flspo, axis=0))

            # false negative
            flsne = np.logical_and(supp_gt, np.logical_not(supp))
            lflsne.append(np.sum(flsne, axis=0))

    res = dict(nmse=np.asarray(lnmse),
               spar=np.asarray(lspar),
               sperr=np.asarray(lsperr),
               flspo=np.asarray(lflspo),
               flsne=np.asarray(lflsne))

    np.savez(config.resfn, **res)
Esempio n. 16
0
def train(args):
    device = torch.device(args.device)
    cls_embed = ModelEmbeddings.load_from_file(args.pretrain_vector)
    train_data = DataSet(args.train_data, args.train_labels, args.batch_size,
                         int(1e7))
    train_data.reorderForEval()
    #  train_data.padtoMaxLen()
    print('loading dev set...')
    #  devSet,devlabels,devNum=DataSet.load_devSet(args.dev_data,args.dev_labels)
    dev_dataset = DataSet(args.dev_data, args.dev_labels, args.batch_size,
                          int(1e7))
    dev_dataset.reorderForEval()
    print('Done dev loading.')
    model = textCNN(args.embedding_size, args.cls_num, args.l1_channels_num)
    model.train()
    model = model.to(device)
    cls_embed.train()
    cls_embed = cls_embed.to(device)
    optimizer = torch.optim.Adam([{
        'params': model.parameters()
    }, {
        'params': cls_embed.parameters(),
        'lr': 1.75 * args.lr
    }],
                                 lr=float(args.lr))
    Loss_fun = torch.nn.CrossEntropyLoss(reduction='sum')

    print('begin Maximum Likelihood training')
    epoch = 0
    sum_loss = 0
    step = 0
    tot_step = 0
    pi_2 = np.pi / 2
    while (True):
        optimizer.zero_grad()
        if (args.use_cos_batch and epoch):
            for param_group in optimizer.param_groups:
                param_group['lr'] = args.lr * np.cos(pi_2 * (step / tot_step))

        example, p = train_data.getTestBatch()
        t = example[0]
        l = example[1]
        example = None
        l = torch.tensor(l, device=device)
        #    print("Doing word embed")
        #    print(type(t))
        #    print(type(l))
        #    print(l.requires_grad)

        t = cls_embed(t, device=device)
        #    t=t.detach()
        #    print("Done word embed")
        #    print("feed into model...")

        outPuts = model(t)
        #    print("Get answer!")
        #    print("Caluating Loss")
        #    print(outPuts.size())
        #    print(l.size())
        #    print(l)
        #    print(outPuts)
        loss = Loss_fun(outPuts, l)
        loss.backward()
        optimizer.step()
        loss /= len(outPuts)
        sum_loss += loss
        print("epoch:{0},step:{1},train loss:{2}".format(epoch, step, loss))
        step += 1
        #    print("Doing backPro")
        #    print("Done backPro")
        #    print("Setping...")

        #    print("Done Step!")
        #    print('Current Batch Loss:{0}'.format(loss))
        if (p):
            tot_step = step
            epoch += p
            print("Epoch mean Loss:{0}".format(sum_loss / step))
            step = 0
            sum_loss = 0
            accuary, F1 = test(args, model, dev_dataset, cls_embed,
                               args.cls_num, device)
            if (model.lastScore < F1):
                print("F1 score grow from {0} to {1}, save model...".format(
                    model.lastScore, F1))
                model.lastScore = F1
                torch.save(model.state_dict(), args.model_save_path)
                torch.save(cls_embed, args.embedding_save_path)
                args.lr = args.lr * np.exp(args.lr_decay)
                for param_group in optimizer.param_groups:
                    param_group['lr'] = args.lr
            else:
                args.lr = args.lr / 2
                for param_group in optimizer.param_groups:
                    param_group['lr'] = args.lr
        if (epoch == args.max_epoch): break
Esempio n. 17
0
def main():
	global use_gpu,device
	# set up some parameters
	batch_size=1
	lr= 1e-3
	logging_path= 'logging/'
	num_epoches= 500
	epoch_to_save= 10
	tsize=10

	data_path= '../dataset'
	print('loading data sets ...')
	dataset_train= DataSet(datapath= data_path)
	loader_train= DataLoader(dataset= dataset_train, num_workers=8, batch_size=batch_size, shuffle=True)

	print("# of training samples: %d\n" %int(len(dataset_train)))

	model= RadarNet(use_gpu=use_gpu,device=device)
	print(model)

	criterion= ComLoss()

	# model.load_state_dict(torch.load('../logging/newest-5_8.pth'))

	if use_gpu:
		model= model.to(device)
		criterion.to(device)

	#optimizer
	optimizer= torch.optim.Adam(model.parameters(), lr=lr)
	scheduler= MultiStepLR(optimizer, milestones=[20,40,60,80], gamma=0.2)

	#record
	writer= SummaryWriter(logging_path)

	#start training
	step= 0
	for epoch in range(num_epoches):
		scheduler.step(epoch)

		for param_group in optimizer.param_groups:
			print('learning rate %f' %param_group['lr'])

		for i, (input_train, target_train) in enumerate(loader_train, 0):
			# input size: (4,10,1,200,200)
			# target size: (4,10,1,200,200)
			model.train()
			model.zero_grad()
			optimizer.zero_grad()

			input_train, target_train= Variable(input_train), Variable(target_train)
			if use_gpu:
				input_train, target_train= input_train.to(device), target_train.to(device)

			out_train= model(input_train)
			loss= -criterion(target_train, out_train)

			loss.backward()
			optimizer.step()

			# training track
			model.eval()
			out_train= model(input_train)
			output_train= torch.clamp(out_train, 0, 1)
			print("[epoch %d/%d][%d/%d]  obj: %.4f "%(epoch+1,num_epoches, i+1,len(loader_train),-loss.item()/tsize))

			if step% 10 ==0:
				writer.add_scalar('loss', loss.item())

			step+=1
			# print('memory allocated: ', torch.cuda.memory_allocated(device=device))
			# print('max memory allocated: ',torch.cuda.max_memory_allocated(device=device))
		#save model
		# if epoch % epoch_to_save==0:
		# 	torch.save(model.state_dict(), os.path.join(logging_path,'net_epoch%d.pth'%(epoch+1)))

	torch.save(model.state_dict(), os.path.join(logging_path,'newest.pth'))
Esempio n. 18
0
def train(args):
    device = torch.device(args.device)
    cls_embed = ModelEmbeddings.load_from_file(args.pretrain_vector)
    train_data = DataSet(args.train_data, args.train_labels, args.batch_size)
    train_data.padtoMaxLen()
    print('loading dev set...')
    devSet, devlabels, devNum = DataSet.load_devSet(args.dev_data,
                                                    args.dev_labels)
    print('Done dev loading.')
    model = textCNN(args.embedding_size, args.cls_num, args.l1_channels_num)
    model.train()
    model = model.to(device)
    cls_embed = cls_embed.to(device)
    optimizer = torch.optim.Adam(model.parameters(), lr=float(args.lr))
    Loss_fun = torch.nn.CrossEntropyLoss()
    dev_dataset = DataSet(args.dev_data, args.dev_labels, args.batch_size)

    print('begin Maximum Likelihood training')
    epoch = 0
    sum_loss = 0
    step = 0
    while (True):
        optimizer.zero_grad()
        t, l, p = train_data.getTrainBatch()
        l = torch.tensor(l, device=device)
        #    print("Doing word embed")
        #    print(type(t))
        #    print(type(l))
        #    print(l.requires_grad)

        t = cls_embed(t, device=device)
        #    t=t.detach()
        #    print("Done word embed")
        #    print("feed into model...")

        outPuts = model(t)
        #    print("Get answer!")
        #    print("Caluating Loss")
        #    print(outPuts.size())
        #    print(l.size())
        #    print(l)
        #    print(outPuts)
        loss = Loss_fun(outPuts, l)
        sum_loss += loss
        print("epoch:{0},step:{1},train loss:{2}".format(epoch, step, loss))
        step += 1
        #    print("Doing backPro")
        loss.backward()
        #    print("Done backPro")
        #    print("Setping...")
        optimizer.step()
        #    print("Done Step!")
        #    print('Current Batch Loss:{0}'.format(loss))
        if (p):
            epoch += p
            print("Epoch mean Loss:{0}".format(sum_loss / step))
            step = 0
            sum_loss = 0
            accuary, F1 = test(args, model, dev_dataset, cls_embed,
                               args.cls_num, device)
            if (model.lastScore < F1):
                model.lastScore = F1
                torch.save(model.state_dict(), args.model_save_path)
                torch.save(cls_embed, args.embedding_save_path)
        if (epoch == args.max_epoch): break
Esempio n. 19
0
def train_bpnet():
    path = "mnist"
    # dataset preparation
    X_train, y_train = load_mnist(path, "train")
    X_test, y_test = load_mnist(path, "t10k")
    # dataset defination
    train_data = DataSet(X_train, y_train)
    test_data = DataSet(X_test, y_test)
    # parameters defination
    in_size = 784
    hid_size = 100
    out_size = 10
    batch_size = 20
    epoches = 150
    # model defination
    net = BpNet(in_size, hid_size, out_size)
    print(train_data.input_data.shape, train_data.target_data.shape)
    # determine inputs dtype
    x = T.dmatrix("x")
    y = T.dmatrix("y")

    # defination of the layers
    learning_rate = 0.1

    prediction = net.forward(x)
    # cost function defination
    cost = cross_entropy_cost(prediction, y)
    # update the grad
    list_q = net.update_grad(cost, learning_rate)

    # apply gradient descent
    train = theano.function(inputs=[x, y], outputs=[cost], updates=list_q)
    # prediction
    predict = theano.function(inputs=[x], outputs=prediction)
    # training model
    loss_list = []
    percentage_list = []
    for k in range(epoches):
        Length = len(X_train) // batch_size
        sum_loss = 0
        for j in range(Length):
            out_x = X_train[j * batch_size:(j + 1) * batch_size, :]
            out_y = y_train[j * batch_size:(j + 1) * batch_size, :]
            err = train(out_x, out_y)
            sum_loss += err[0]
        out_pre = predict(test_data.input_data)
        out_org = test_data.target_data
        percentage = test_data_op(out_pre, out_org)
        print("epoches:%d loss:%0.4f correct:%0.2f%%" %
              (k, sum_loss / Length, percentage * 100))
        loss_list.append(sum_loss / Length)
        percentage_list.append(percentage)
    # ----------------------------------------------------------------
    # save the model
    model_name = 'bpnet.pkt'
    path_save = 'bpnet'
    if not os.path.exists(path_save):
        os.mkdir(path_save)
    f = open(os.path.join(path_save, model_name), 'wb')
    pickle.dump(net, f, protocol=pickle.HIGHEST_PROTOCOL)
    f.close()
    # ----------------------------------------------------------------
    # save the loss image
    x = np.linspace(0, len(loss_list), len(loss_list))
    plt.plot(x, loss_list)
    plt.savefig(os.path.join(path_save, "loss.png"))
    plt.show()
    with open(os.path.join(path_save, "loss.txt"), "w") as fp:
        for k in range(len(loss_list)):
            fp.write(str(loss_list[k]) + "\n")
    x = np.linspace(0, len(percentage_list), len(percentage_list))
    plt.plot(x, percentage_list)
    plt.savefig(os.path.join(path_save, "percentage.png"))
    plt.show()
    with open(os.path.join(path_save, "percentage.txt"), "w") as fp:
        for k in range(len(percentage_list)):
            fp.write(str(percentage_list[k]) + "\n")
Esempio n. 20
0
import torch.nn.functional as F
import torch.optim as optim
from datetime import datetime
from tensorboardX import SummaryWriter
from utils import DataSet, validate, show_confMat  # 自定义类
from net import Net  # 导入模型

# 定义超参
EPOCH = 5
BATCH_SIZE = 24
classes_name = [str(c) for c in range(16)]

# --------------------加载数据---------------------
# Indian Pines .mat文件路径(每个文件都是一个单独的类)
path = os.path.join(os.getcwd(), "patch")
training_dataset = DataSet(path=path, train=True)
testing_dataset = DataSet(path=path, train=False)
# Data Loaders
train_loader = torch.utils.data.DataLoader(
    dataset=training_dataset,
    batch_size=BATCH_SIZE,
    shuffle=True
)

test_loader = torch.utils.data.DataLoader(
    dataset=testing_dataset,
    batch_size=BATCH_SIZE,
    shuffle=True
)

# 检查cuda是否可用
Esempio n. 21
0
def main():
    dataset = DataSet(TRAIN, DEV, [TEST])
Esempio n. 22
0
def do_training_step(model, iteration, task_iterations, batch_size,
                     train_images, train_labels):
    from time import time
    from utils import DataSet

    iteration *= task_iterations
    start = time()

    train = DataSet(train_images, train_labels)

    rate, factor = model['rate_factor']
    learning_rate = rate / factor
    neurons = model['neuron_number']

    x = tf.placeholder(tf.float32, shape=[None, 784])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    # First Convolutional layer
    W_conv1 = weight_variable([5, 5, 1, 32])
    b_conv1 = bias_variable([32])

    x_image = tf.reshape(x, [-1, 28, 28, 1])

    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)

    # Second Convolutional layer
    W_conv2 = weight_variable([5, 5, 32, 64])
    b_conv2 = bias_variable([64])

    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)

    # Densely Connected layer
    W_fc1 = weight_variable([7 * 7 * 64, neurons])
    b_fc1 = bias_variable([neurons])

    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

    # Dropout
    keep_prob = tf.placeholder(tf.float32)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # Readout
    W_fc2 = weight_variable([neurons, 10])
    b_fc2 = bias_variable([10])

    y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

    cross_entropy = tf.reduce_mean(
        -tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver()  # defaults to saving all variables

    sess = tf.Session()

    if iteration == 0:
        init_op = tf.initialize_all_variables()
        sess.run(init_op)
    else:
        try:
            saver.restore(sess, model['path'])
            import os
            os.remove(model['path'])
            os.remove("%s.meta" % model['path'])
            print("Model %s correctly loaded i:%s" %
                  (model['path'], iteration))
        except Exception as e:
            print("No checkpoint for model %s found in iter %s\n%s" %
                  (model['path'], iteration, e))
            sys.exit(1)

    if not model:
        model = dict()

    t1 = time()
    import numpy as np
    step_times = np.array([])
    for i in range(iteration, iteration + task_iterations):
        ls = time()
        batch_offset = i * batch_size
        batch = train.get_batch(batch_offset, batch_size)

        train_step.run(session=sess,
                       feed_dict={
                           x: batch[0],
                           y_: batch[1],
                           keep_prob: 0.5
                       })
        # print("Iteration %s - %s s " % (i, time()-ls))
        step_times = np.append(step_times, (time() - ls))
    print("Loop\n Avg: %s\n Max: %s\n Min: %s" %
          (step_times.mean(), step_times.max(), step_times.min()))
    print("Loop time %s" % (time() - t1))
    save_path = saver.save(sess, "%s_%s" % (model['base_path'], iteration))

    model['path'] = save_path

    training_accuracy = accuracy.eval(session=sess,
                                      feed_dict={
                                          x: batch[0],
                                          y_: batch[1],
                                          keep_prob: 1.0
                                      })
    model['train_accuracy'] = training_accuracy

    end = time()
    print(
        "Training stats:\n - Neurons's number: %s\n - Learning rate: %s\n - Model: %s\n - Time: %s\n" % \
        (neurons, learning_rate, save_path, (end - start)))

    return model
    return result


def read_image(image, name):
    result = detect(image)
    draw_result(image, result)
    plt.imshow(image)
    plt.savefig(os.getcwd() + '/' + name + 'output.png')


if settings.output == 1:
    image = cv2.imread(settings.picture_name)
    read_image(image, settings.picture_name[-10:])

if settings.output == 2:
    labels = DataSet('test').load_labels()
    for i in xrange(len(labels)):
        print labels[i]['imname']
        image = cv2.imread(labels[i]['imname'])
        read_image(image, labels[i]['imname'][-10:])

if settings.output == 3:
    cap = cv2.VideoCapture(-1)
    ret, _ = cap.read()
    while ret:
        ret, frame = cap.read()
        result = detect(frame)
        draw_result(frame, result)
        cv2.imshow('Camera', frame)
        cv2.waitKey(wait)
Esempio n. 24
0
def read_data(test_rate=0.1, one_hot=True):
    """read .dat data"""
    # read data
    true_inf = []
    fake_inf = []
    X = []
    i = 1
    filee = open('/home/summit/DDI/leftGFSK_DOWN.dat', 'rb')
    try:
        while True:
            if (i != 65):
                lines1 = filee.read(1)
                lines2 = filee.read(1)
                lines3 = filee.read(1)
                lines4 = filee.read(1)
                lines5 = filee.read(1)
                lines6 = filee.read(1)
                lines7 = filee.read(1)
                lines8 = filee.read(1)
                lines9 = filee.read(1)
                lines10 = filee.read(1)
                lines11 = filee.read(1)
                lines12 = filee.read(1)
                lines13 = filee.read(1)
                lines14 = filee.read(1)
                lines15 = filee.read(1)
                lines16 = filee.read(1)
                k1 = ord(lines1)
                k2 = ord(lines2)
                k3 = ord(lines3)
                k4 = ord(lines4)
                k5 = ord(lines5)
                k6 = ord(lines6)
                k7 = ord(lines7)
                k8 = ord(lines8)
                k9 = ord(lines9)
                k10 = ord(lines10)
                k11 = ord(lines11)
                k12 = ord(lines12)
                k13 = ord(lines13)
                k14 = ord(lines14)
                k15 = ord(lines15)
                k16 = ord(lines16)
                if k1 + k2 == 0:
                    break
                X.append(
                    joint_col(k1, k2, k3, k4, k5, k6, k7, k8, k9, k10, k11,
                              k12, k13, k14, k15, k16))
                a = np.array(X)
                #print(i)
                i = i + 1
            else:
                data = np.array(X)
                data = data.reshape(data.shape[0], data.shape[1], 1) / 3.
                X = []
                true_inf.append(data)
                i = 1
    finally:
        filee.close()
        true_inf = np.stack(true_inf, axis=0)

    X = []
    i = 1
    filee = open('/home/summit/DDI/leftGFSK_UP.dat', 'rb')
    try:
        while True:
            if (i != 65):
                lines1 = filee.read(1)
                lines2 = filee.read(1)
                lines3 = filee.read(1)
                lines4 = filee.read(1)
                lines5 = filee.read(1)
                lines6 = filee.read(1)
                lines7 = filee.read(1)
                lines8 = filee.read(1)
                lines9 = filee.read(1)
                lines10 = filee.read(1)
                lines11 = filee.read(1)
                lines12 = filee.read(1)
                lines13 = filee.read(1)
                lines14 = filee.read(1)
                lines15 = filee.read(1)
                lines16 = filee.read(1)
                k1 = ord(lines1)
                k2 = ord(lines2)
                k3 = ord(lines3)
                k4 = ord(lines4)
                k5 = ord(lines5)
                k6 = ord(lines6)
                k7 = ord(lines7)
                k8 = ord(lines8)
                k9 = ord(lines9)
                k10 = ord(lines10)
                k11 = ord(lines11)
                k12 = ord(lines12)
                k13 = ord(lines13)
                k14 = ord(lines14)
                k15 = ord(lines15)
                k16 = ord(lines16)
                if k1 + k2 == 0:
                    break
                X.append(
                    joint_col(k1, k2, k3, k4, k5, k6, k7, k8, k9, k10, k11,
                              k12, k13, k14, k15, k16))
                a = np.array(X)
                #print(i)
                i = i + 1
            else:
                data = np.array(X)
                data = data.reshape(data.shape[0], data.shape[1], 1) / 3.
                X = []
                fake_inf.append(data)
                i = 1
    finally:
        filee.close()
        fake_inf = np.stack(fake_inf, axis=0)

    # split train set and test 划分训练和数测试据集
    test_split_true_inf = np.random.rand(true_inf.shape[0]) < test_rate
    test_split_fake_inf = np.random.rand(fake_inf.shape[0]) < test_rate

    test_true_inf = true_inf[test_split_true_inf, :, :, :]
    test_label_true_inf = np.ones([test_true_inf.shape[0]])
    train_true_inf = true_inf[np.logical_not(test_split_true_inf), :, :, :]
    train_label_true_inf = np.ones([train_true_inf.shape[0]])

    test_fake_inf = fake_inf[test_split_fake_inf, :, :, :]
    test_label_fake_inf = np.ones([test_fake_inf.shape[0]]) * 2
    train_fake_inf = fake_inf[np.logical_not(test_split_fake_inf), :, :, :]
    train_label_fake_inf = np.ones([train_fake_inf.shape[0]]) * 2

    test_infs = np.concatenate((test_true_inf, test_fake_inf), axis=0)
    train_infs = np.concatenate((train_true_inf, train_fake_inf), axis=0)

    test_labels = np.concatenate((test_label_true_inf, test_label_fake_inf),
                                 axis=0)
    train_labels = np.concatenate((train_label_true_inf, train_label_fake_inf),
                                  axis=0)

    if one_hot == True:
        test_labels = test_labels.reshape(test_labels.shape[0], 1)
        train_labels = train_labels.reshape(train_labels.shape[0], 1)
        enc = OneHotEncoder()
        enc.fit(test_labels)
        test_labels = enc.transform(test_labels).toarray()
        train_labels = enc.transform(train_labels).toarray()
    #print("test_labels")
    #print(test_labels)
    #print("test_pics")
    #print(test_pics)
    #print("train_labels")
    #print(train_labels)
    #print("train_pics")
    #print(train_pics)

    data = utils.get_data(train=DataSet(images=train_infs,
                                        labels=train_labels),
                          test=DataSet(images=test_infs, labels=test_labels))
    print("type(data) line156")
    print(type(data))
    return data
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), './'))
import settings
import models
from utils import DataSet
import os
import time
import tensorflow as tf
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np

sess = tf.InteractiveSession()
model = models.Model()
utils = DataSet('train')
saver = tf.train.Saver(
    var_list=tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='yolo'))
sess.run(tf.global_variables_initializer())

try:
    saver.restore(sess, os.getcwd() + '/model.ckpt')
    print('load from past checkpoint')
except Exception as e:
    print(e)
    try:
        print('load yolo small')
        saver.restore(sess, os.getcwd() + '/YOLO_small.ckpt')
        print('loaded from YOLO small pretrained')
    except Exception as e:
        print(e)
        print('exit, atleast need a pretrained model')
Esempio n. 26
0
def test(config):
    model = SelfAttNet(config)
    model.eval()
    dataset = DataSet(cofig)
Esempio n. 27
0
def do_testing(model, test_images, test_labels):
    from time import time
    from utils import DataSet

    start = time()

    rate, factor = model['rate_factor']
    learning_rate = rate / factor
    neurons = model['neuron_number']
    test = DataSet(test_images, test_labels)

    x = tf.placeholder(tf.float32, shape=[None, 784])
    y_ = tf.placeholder(tf.float32, shape=[None, 10])

    # First Convolutional layer
    W_conv1 = weight_variable([5, 5, 1, 32])
    b_conv1 = bias_variable([32])

    x_image = tf.reshape(x, [-1, 28, 28, 1])

    h_conv1 = tf.nn.relu(conv2d(x_image, W_conv1) + b_conv1)
    h_pool1 = max_pool_2x2(h_conv1)

    # Second Convolutional layer
    W_conv2 = weight_variable([5, 5, 32, 64])
    b_conv2 = bias_variable([64])

    h_conv2 = tf.nn.relu(conv2d(h_pool1, W_conv2) + b_conv2)
    h_pool2 = max_pool_2x2(h_conv2)

    # Densely Connected layer
    W_fc1 = weight_variable([7 * 7 * 64, neurons])
    b_fc1 = bias_variable([neurons])

    h_pool2_flat = tf.reshape(h_pool2, [-1, 7 * 7 * 64])
    h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat, W_fc1) + b_fc1)

    # Dropout
    keep_prob = tf.placeholder(tf.float32)
    h_fc1_drop = tf.nn.dropout(h_fc1, keep_prob)

    # Readout
    W_fc2 = weight_variable([neurons, 10])
    b_fc2 = bias_variable([10])

    y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop, W_fc2) + b_fc2)

    cross_entropy = tf.reduce_mean(
        -tf.reduce_sum(y_ * tf.log(y_conv), reduction_indices=[1]))
    train_step = tf.train.AdamOptimizer(
        learning_rate=learning_rate).minimize(cross_entropy)
    correct_prediction = tf.equal(tf.argmax(y_conv, 1), tf.argmax(y_, 1))
    accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

    saver = tf.train.Saver()  # defaults to saving all variables

    sess = tf.Session()

    try:
        saver.restore(sess, model['path'])
        print("Model %s correctly loaded" % model['path'])
    except Exception as e:
        print("No checkpoint found\n%s" % e)
        sys.exit(1)

    test_accuracy = accuracy.eval(session=sess,
                                  feed_dict={
                                      x: test.images,
                                      y_: test.labels,
                                      keep_prob: 1.0
                                  })
    model['previous_test'] = model['test_accuracy']
    model['test_accuracy'] = test_accuracy

    end = time()
    print(
        "Testing stats:\n - Neurons's number: %s\n - Learning rate: %s\n - Test accuracy: %s\n - Model: %s\n - Time: %s\n" % \
        (neurons, learning_rate, test_accuracy, model['path'], (end - start)))

    return model
Esempio n. 28
0
        loss = loss / length
        print(index, loss)
        loss_svm.append(loss)
    loss_svm = np.array(loss_svm)
    x_list = np.linspace(0, len(loss_svm), len(loss_svm))
    plt.plot(x_list, loss_svm)
    plt.show()
    f = open('RbmSvm.pt', 'wb')
    pickle.dump(net, f, protocol=pickle.HIGHEST_PROTOCOL)
    f.close()


if __name__ == '__main__':
    file_path = "/home/asus/py3env/project/project/data/winequality-red.csv"
    dataset = DataSet(file_path,
                      train=True,
                      transform=standard_vector,
                      target_transform=standard_vector)
    dataloader = DataLoader(dataset, 10)
    params = {
        "input_size": 10,
        "hidden_size": [100, 500],
        "output_size": 1,
        "learning_rate": 0.015,
        "training_epoches": 120,
        "pretrain_times": 7
    }
    # train_BpNet(dataloader,params)
    # train_SvmMlp(dataloader,params)
    # train_RbmSvm(dataloader,params)
    trainDBNMlp(dataloader, params)
    file_path = "/home/asus/py3env/project/project/data/winequality-red.csv"