Exemple #1
0
def main(args):
    checkArgs()
    if FLAGS.testing:
        model.test(FLAGS)
    elif FLAGS.finetune:
        model.training(FLAGS, is_finetune=True)
    else:
        model.training(FLAGS, is_finetune=False)
Exemple #2
0
	def post(self):
		try:
			data = json.loads(self.request.body)
			result = model.test(data['name'], data['X'])
			self.write({'success' : True, 'prediction' : result})
		except Exception as ex:
			self.write({'success' : False, 'error' : ex.__repr__()})
Exemple #3
0
def main(train=False):
    run = True
    clock = pygame.time.Clock()
    started = False
    player_win = False
    which_player = 0
    replay = False

    def draw(win):

        win.fill(BLACK)
        if started:
            player2.draw(win)
            player1.draw(win)

            player1.move()
            player2.move()

            ball.draw(win)
            ball.move()

            win.blit(*render_text('PLAYER 1', WIDTH // 4, 22, FONT_BIG))
            win.blit(*render_text('PLAYER 2', WIDTH -
                                  (WIDTH // 4), 22, FONT_BIG))

            win.blit(*render_text('{}'.format(player1.score), WIDTH //
                                  4, 62, FONT_SML))
            win.blit(*render_text('{}'.format(player2.score), WIDTH -
                                  (WIDTH // 4), 62, FONT_SML))

            # draw the separate line
            for i in range(0, HEIGHT, 10):
                pygame.draw.rect(win, WHITE, [(WIDTH // 2) - 1, i, 2, 7])

        else:
            win.blit(*render_text('AI PING PONG GAME', WIDTH // 2, HEIGHT //
                                  3, FONT_XXBIG))
            win.blit(*render_text('PRESS SPACE OR RETURN TO START THE GAME',
                                  WIDTH // 2, HEIGHT -
                                  (HEIGHT // 3), FONT_BIG))

        if player_win:
            ball.move_val_x = 0
            ball.move_val_y = 0
            ball.x, ball.y = CENTER

            win.blit(*render_text('GAME OVER', WIDTH // 2, HEIGHT //
                                  5, FONT_XBIG))
            win.blit(
                *render_text('PLAYER {} WINS'.format(which_player), WIDTH //
                             2, HEIGHT // 3, FONT_XBIG))

            win.blit(*render_text('PRESS SPACE OR RETURN TO START THE GAME',
                                  WIDTH // 2, HEIGHT -
                                  (HEIGHT // 3), FONT_BIG))

        pygame.display.update()

    while run:

        keys = pygame.key.get_pressed()

        clock.tick(FPS)
        draw(WIN)

        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                run = False
                pygame.quit()
                return

        if keys[pygame.K_SPACE] or keys[pygame.K_RETURN]:
            started = True

        if player1.score == 6 or player2.score == 6:
            if keys[pygame.K_SPACE] or keys[pygame.K_RETURN]:
                replay = True
                player_win = False
                which_player = 0
                player1.score = 0
                player2.score = 0

                ball.move_val_x = BALL_SPEED
                ball.move_val_y = BALL_SPEED

        if started:

            data['ball'].append([ball.y, ball.x])

            data['player1_y'].append(player1.y)
            data['player2_y'].append(player2.y)

            # check for the collisions
            if ball.y in range(
                    player1.y, player1.y + PLAYER_HEIGHT
            ) and ball.x <= PLAYER_WIDTH + PADD_VAL + BALL_RADIUS:
                ball.move_val_x *= -1

            if ball.y in range(
                    player2.y, player2.y + PLAYER_HEIGHT
            ) and ball.x >= WIDTH - (PLAYER_WIDTH + PADD_VAL + BALL_RADIUS):
                ball.move_val_x *= -1

            # check for the score change
            if ball.x < 0:
                player2.score += 1
                ball.x, ball.y = CENTER

            if ball.x > WIDTH:
                player1.score += 1
                ball.x, ball.y = CENTER

            # check for the winning
            if player1.score >= 6:
                player_win = True
                which_player = 1

            elif player2.score >= 6:
                player_win = True
                which_player = 2
            if train:
                # left player
                # player1.y = test([ball.y], './models/[1612961767]-model-best.pkl')
                if keys[pygame.K_w]:
                    player1.y -= SPEED
                if keys[pygame.K_s]:
                    player1.y += SPEED

                # right player
                if keys[pygame.K_UP]:
                    player2.y -= SPEED
                if keys[pygame.K_DOWN]:
                    player2.y += SPEED

            else:
                # ai player1
                player1.y = test([ball.y, ball.x], trained_model)

                # ai player2
                # player2.y = test([ball.y, ball.x], trained_model)
                if keys[pygame.K_UP]:
                    player2.y -= SPEED
                if keys[pygame.K_DOWN]:
                    player2.y += SPEED

    pygame.display.update()
Exemple #4
0
from config import *
import data
import model

def defineArgs():
    """define args"""
    parser = argparse.ArgumentParser(description = "Chinese_poem_generator.")
    parser.add_argument("-m", "--mode", help = "select mode by 'train' or test or head",
                        choices = ["train", "test", "head"], default = "test")
    return parser.parse_args()

if __name__ == "__main__":
    args = defineArgs()
    trainData = data.POEMS(trainPoems)
    model = model.MODEL(trainData)
    if args.mode == "train":
        model.train()
    else:
        if args.mode == "test":
            poems = model.test()
        else:
            characters = input("please input chinese character:")
            poems = model.testHead(characters)
Exemple #5
0
def main():
  # Read the data from the text files
  begin = time.time()
  vocab, train_raw, test_raw = read.read_tweets("../training_set_tweets.txt", "../test_set_tweets.txt")
  print "Num of Train users:", len(train_raw), "Num of Test users:", len(test_raw)
  print "Read data:", time.time() - begin

  # Preprocess the data
  begin = time.time()
  vocab, bigrams, train_word, test_word, train_char, test_char = preprocessing.preprocess(train_raw, test_raw)
  print "Preprocessed the data", time.time() - begin

  return
  # Assign ids to words
  vocab_list = list(vocab)
  vocab_list.sort()
  begin = time.time()
  vocab_dict = {}
  for i in range(len(vocab_list)):
      vocab_dict[vocab_list[i]] = i
  print "Assigned ids to words:", time.time() - begin

  # Build train and test set
  num_full_feats = len(vocab_list) + 10
  num_train_tweets = 0
  num_test_tweets = 0
  # num_train_tweets = np.count_nonzero(~np.isnan(train))
  # num_test_tweets = np.count_nonzero(~np.isnan(test))
  for author_id in train:
      num_train_tweets += len(train[author_id])
  for author_id in test:
      num_test_tweets += len(test[author_id])
  X_train = np.zeros((num_train_tweets, num_full_feats))
  y_train = np.zeros(num_train_tweets)
  X_test = np.zeros((num_test_tweets, num_full_feats))
  y_test = np.zeros(num_test_tweets)

  # Build train and test set
  num_full_feats = len(vocab_list) + 10
  num_train_tweets = 0
  num_test_tweets = 0
  # num_train_tweets = np.count_nonzero(~np.isnan(train))
  # num_test_tweets = np.count_nonzero(~np.isnan(test))
  for author_id in train_word:
      num_train_tweets += len(train_word[author_id])
  for author_id in test_word:
      num_test_tweets += len(test_word[author_id])
  X_train = np.zeros((num_train_tweets, num_full_feats))
  y_train = np.zeros(num_train_tweets)
  X_test = np.zeros((num_test_tweets, num_full_feats))
  y_test = np.zeros(num_test_tweets)

  count = 0

  for author_id in train_word:
      for tweet in train_word[author_id]:
          X_train[count, :] = features.get_full_feats(tweet, vocab_dict)
          y_train[count] = author_id
          count += 1
  print count

  count = 0
  for author_id in test_word:
      for tweet in test_word[author_id]:
          X_test[count, :] = features.get_full_feats(tweet, vocab_dict)
          y_test[count] = author_id
          count += 1
  print count

  begin = time.time()
  feats = feature_selection.select_features(X_train, y_train, np.zeros(num_full_feats), 100, "dia")
  X_train = X_train[:, feats]
  X_test = X_test[:, feats]
  print "Features selected:", time.time() - begin

  begin = time.time()
  clf = model.train(X_train, y_train)
  acc, my_acc, preds, scores = model.test(clf, X_test, y_test)
  print 'time:', time.time()-begin, 'acc:', acc, 'my_acc:', my_acc
  print 'preds:', preds
  print 'scores:', scores

  print (preds == y_test)[:100]
  print np.count_nonzero(scores > 0)
  print np.count_nonzero(scores < 0)
Exemple #6
0
def train(sess, model, hps, logdir, visualise):
    _print(hps)
    _print('Starting training. Logging to', logdir)
    _print('epoch n_processed n_images ips dtrain dtest dsample dtot train_results test_results msg')

    # Train
    sess.graph.finalize()
    n_processed = 0
    n_images = 0
    train_time = 0.0
    test_loss_best = 999999

    if hvd.rank() == 0:
        train_logger = ResultLogger(logdir + "train.txt", **hps.__dict__)
        test_logger = ResultLogger(logdir + "test.txt", **hps.__dict__)

    tcurr = time.time()
    for epoch in range(1, hps.epochs):

        t = time.time()

        train_results = []
        for it in range(hps.train_its):

            # Set learning rate, linearly annealed from 0 in the first hps.epochs_warmup epochs.
            lr = hps.lr * min(1., n_processed /
                              (hps.n_train * hps.epochs_warmup))

            # Run a training step synchronously.
            _t = time.time()
            train_results += [model.train(lr)]
            if hps.verbose and hvd.rank() == 0:
                _print(n_processed, time.time()-_t, train_results[-1])
                sys.stdout.flush()

            # Images seen wrt anchor resolution
            n_processed += hvd.size() * hps.n_batch_train
            # Actual images seen at current resolution
            n_images += hvd.size() * hps.local_batch_train

        train_results = np.mean(np.asarray(train_results), axis=0)

        dtrain = time.time() - t
        ips = (hps.train_its * hvd.size() * hps.local_batch_train) / dtrain
        train_time += dtrain

        if hvd.rank() == 0:
            train_logger.log(epoch=epoch, n_processed=n_processed, n_images=n_images, train_time=int(
                train_time), **process_results(train_results))

        if epoch < 10 or (epoch < 50 and epoch % 10 == 0) or epoch % hps.epochs_full_valid == 0:
            test_results = []
            msg = ''

            t = time.time()
            # model.polyak_swap()

            if epoch % hps.epochs_full_valid == 0:
                # Full validation run
                for it in range(hps.full_test_its):
                    test_results += [model.test()]
                test_results = np.mean(np.asarray(test_results), axis=0)

                if hvd.rank() == 0:
                    test_logger.log(epoch=epoch, n_processed=n_processed,
                                    n_images=n_images, **process_results(test_results))

                    # Save checkpoint
                    if test_results[0] < test_loss_best:
                        test_loss_best = test_results[0]
                        model.save(logdir+"model_best_loss.ckpt")
                        msg += ' *'

            dtest = time.time() - t

            # Sample
            t = time.time()
            if epoch == 1 or epoch == 10 or epoch % hps.epochs_full_sample == 0:
                visualise(epoch)
            dsample = time.time() - t

            if hvd.rank() == 0:
                dcurr = time.time() - tcurr
                tcurr = time.time()
                _print(epoch, n_processed, n_images, "{:.1f} {:.1f} {:.1f} {:.1f} {:.1f}".format(
                    ips, dtrain, dtest, dsample, dcurr), train_results, test_results, msg)

            # model.polyak_swap()

    if hvd.rank() == 0:
        _print("Finished!")
Exemple #7
0
__author__ = 'zhxia'

from redisrpc.server import RedisRpcServer
from model import test

if __name__ == '__main__':
    rpcServer = RedisRpcServer()
    rpcServer.setDelegate(test())
    rpcServer.start()
Exemple #8
0
#     plt.subplot(2,2,2)
#     plt.imshow(Y[0,:,:,0],cmap='gray')
#     plt.axis('off')
#     plt.subplot(2,2,3)
#     plt.imshow(mX[0,:,:,0],cmap='gray')
#     plt.axis('off')
#     plt.subplot(2,2,4)
#     plt.imshow(mY[0,:,:,0],cmap='gray')
#     plt.axis('off')
#     plt.show()
test_set = train_dataset.DataPipeLine(test_path,
                                      target_size=[240, 240],
                                      patch_size=[128, 128],
                                      test_flag=True)
test_set = tf.data.Dataset.from_generator(test_set.generator,output_types=(tf.float32,tf.float32,tf.float32,tf.float32,tf.int32),output_shapes=([240,240],[240,240],[240,240],[240,240],[2,2]))\
            .map(map_func,num_parallel_calls=num_threads)\
            .batch(BATCH_SIZE)\
            .prefetch(buffer_size = tf.data.experimental.AUTOTUNE)

# for i,(X,Y) in enumerate(dataset):
#     print(i+1,X.shape,Y.dtype)
model = model.CycleGAN(train_set=dataset,
                       test_set=test_set,
                       loss_name="WGAN-GP-SN",
                       mixed_precision=True,
                       learning_rate=1e-4,
                       tmp_path=tmp_path,
                       out_path=out_path)
model.build(X_shape=[None, 128, 128, 1], Y_shape=[None, 128, 128, 1])
model.test()
Exemple #9
0
@author: ashima.garg
"""

import config
import data
import model

if __name__ == "__main__":
    data = data.Data()
    data.read_train(config.TRAIN_X_PATH, config.TRAIN_Y_PATH)
    data.preprocess()
    data.split()
    print("data read")
    
    model = model.Model()
    model.build()
    print("model build")
    
    model.train(data)
    print("model trained")
    
    model.test(data)
    print("model tested")
    '''
    data.read_test(config.TEST_X_PATH)
    data.preprocess()
    print("model predicted")
    '''
    
    # X_Test = X_Test / 255.0
    print(X_Test[0][0])
    name_Test = [i[1] for i in test_data]

    try:
        net
        print("Net exists")
    except NameError:
        print("Loading NN")
        net = model.Net(len(labels)).to(model.device)
        net.load_state_dict(torch.load('mytraining.pt'))
        net.eval()

    print(net)
    print("-----default model----")
    def_pred = model.test(net, X_Test.float(), name_Test, labels)
    print("---------model1------")
    net.load_state_dict(torch.load('model1.pt'))
    net.eval()
    mod1_pred = model.test(net, X_Test.float(), name_Test, labels)
    print("------model2------")
    net.load_state_dict(torch.load('model2.pt'))
    net.eval()
    mod2_pred = model.test(net, X_Test.float(), name_Test, labels)

    results = [[x, y, z] for x, y, z in zip(def_pred, mod1_pred, mod2_pred)]
    for r, name in zip(results, name_Test):
        print(f'{name} is {np.unique(r)}')
if SHOW_GRAPH:
    gr(model.MODEL_NAME, EPOCHS, BATCH_SIZE, TRAIN_SIZE)
Exemple #11
0
    params['ModelParams'][
        'testInterval'] = 2000  # the number of training interations between testing
    params['ModelParams']['device_ids'] = [
        0, 1
    ]  # the id of the GPUs for the multi-GPU

    # params of the DataManager
    params['DataManagerParams']['VolSize'] = np.asarray(
        [64, 64, 24], dtype=int)  # the size of the crop image
    params['DataManagerParams']['TestStride'] = np.asarray(
        [64, 64, 24], dtype=int
    )  # the stride of the adjacent crop image in testing phase and validation phase

    # Ture: produce the probaility map in the testing phase, False: produce the  label image
    params['TestParams']['ProbabilityMap'] = False

    model = model.Model(params)
    train = [i for i, j in enumerate(sys.argv) if j == '-train']
    if len(train) > 0:
        model.train()  #train model

    test = [i for i, j in enumerate(sys.argv) if j == '-test']
    for i in sys.argv:
        if (i.isdigit()):
            snapnumber = i
            break
    if len(test) > 0:
        model.test(
            snapnumber
        )  # test model, the snapnumber is the number of the model snapshot
Exemple #12
0
import sys
import pandas as pd
# from preprocess import ppSketch, ppPhoto
from model import save, load, train, test, Model

action = sys.argv[0]
actions = {
    "save": lambda: save(sys.argv[2]),
    "load": lambda: load(sys.argv[2]),
    "train": lambda: train(pd.read_csv('X_train.csv', header=None, skiprows=1),\
                            pd.read_csv('y_train.csv', header=None, skiprows=1)),
    "test": lambda: test(pd.read_csv(sys.argv[2])),
}
actions[sys.argv[1]]()
        print('time used to get image: ' + str(end_time - start_time))
    else:
        print('if input just one image, it should be in predict mode')

print('get all image data')
start_time = time.time()
X, valid_list = data.get_image(file_list, resize_size)
end_time = time.time()
print('time used to extract image data: ' + str(end_time - start_time))
# print(X)

if (mode_flag == 'test'):
    print('get all label data')
    y = data.get_label_data(label_file_path, valid_list)
    start_time = time.time()
    model.test(X, y, resize_size, pca_components)
    end_time = time.time()
    print('finish')
    print('time used for test: ' + str(end_time - start_time))
elif (mode_flag == 'train'):
    print('get all label data')
    y = data.get_label_data(label_file_path, valid_list)
    print('start training')
    start_time = time.time()
    model.train(X, y, resize_size, pca_components)
    end_time = time.time()
    print('finish train, model in clf and pca')
    print('time used for train: ' + str(end_time - start_time))
elif (mode_flag == 'test_predict'):
    print('get all label data')
    y = data.get_label_data(label_file_path, valid_list)
Exemple #14
0
def train(sess, model, hps, logdir, visualise):
    _print(hps)
    _print('Starting training. Logging to', logdir)
    _print(
        'epoch n_processed n_images ips dtrain dtest dsample dtot train_results test_results msg'
    )

    # Train
    sess.graph.finalize()
    n_processed = 0
    n_images = 0
    train_time = 0.0
    test_loss_best = 999999

    if True:  # hvd.rank() == 0:
        train_logger = ResultLogger(logdir + "train.txt", **hps.__dict__)
        test_logger = ResultLogger(logdir + "test.txt", **hps.__dict__)

    tcurr = time.time()
    for epoch in range(1, hps.epochs):

        t = time.time()

        train_results = []
        for it in range(hps.train_its):

            # Set learning rate, linearly annealed from 0 in the first hps.epochs_warmup epochs.
            lr = hps.lr * min(1., n_processed /
                              (hps.n_train * hps.epochs_warmup))

            # Run a training step synchronously.
            _t = time.time()
            train_results += [model.train(lr)]
            if hps.verbose == 0:
                _print(n_processed, time.time() - _t, train_results[-1])
                sys.stdout.flush()

            # Images seen wrt anchor resolution
            n_processed += hps.n_batch_train
            # Actual images seen at current resolution
            n_images += hps.local_batch_train

        train_results = np.mean(np.asarray(train_results), axis=0)

        dtrain = time.time() - t
        ips = (hps.train_its * hps.local_batch_train) / dtrain
        train_time += dtrain

        if True:  # hvd.rank() == 0:
            train_logger.log(epoch=epoch,
                             n_processed=n_processed,
                             n_images=n_images,
                             train_time=int(train_time),
                             **process_results(train_results))

        if epoch < 10 or (epoch < 50 and epoch % 10
                          == 0) or epoch % hps.epochs_full_valid == 0:
            test_results = []
            msg = ''

            t = time.time()
            # model.polyak_swap()

            if epoch % hps.epochs_full_valid == 0:
                # Full validation run
                for it in range(hps.full_test_its):
                    test_results += [model.test()]
                test_results = np.mean(np.asarray(test_results), axis=0)

                if True:  # hvd.rank() == 0:
                    test_logger.log(epoch=epoch,
                                    n_processed=n_processed,
                                    n_images=n_images,
                                    **process_results(test_results))
                    model.save(logdir + f"model_{epoch}.ckpt")
                    # Save checkpoint
                    if test_results[0] < test_loss_best:
                        test_loss_best = test_results[0]
                        model.save(logdir + "model_best_loss.ckpt")
                        msg += ' *'

            dtest = time.time() - t

            # Sample
            t = time.time()
            if epoch == 1 or epoch == 10 or epoch % hps.epochs_full_sample == 0:
                visualise(epoch)
            dsample = time.time() - t

            if True:  # hvd.rank() == 0:
                dcurr = time.time() - tcurr
                tcurr = time.time()
                _print(
                    epoch, n_processed, n_images,
                    "{:.1f} {:.1f} {:.1f} {:.1f} {:.1f}".format(
                        ips, dtrain, dtest, dsample,
                        dcurr), train_results, test_results, msg)

            # model.polyak_swap()

    if True:  # hvd.rank() == 0:
        _print("Finished!")
Exemple #15
0
def test(model, chars):
    print("Start Model Test...")
    model.load_weights('model/model.h5', by_name=True)
    md.test(model, chars)
    print("Finish Model Test")
Exemple #16
0
# Import model files
import model
from dataset import BKVOCDataset

# Parameters
gpu_id = 0
# Modify the value below to match the value of max_training_iters_3 in the training notebook!
max_training_iters = 50000

# Model paths
segnet_stream = 'weak'
ckpt_name = 'vgg_16_4chan_' + segnet_stream
ckpt_path = 'models/' + ckpt_name + '/' + ckpt_name + '.ckpt-' + str(
    max_training_iters)

# Load the Berkeley-augmented Pascal VOC 2012 segmentation dataset
dataset = BKVOCDataset(phase='test')

# Display dataset configuration
dataset.print_config()

# Test the model
with tf.Graph().as_default():
    with tf.device('/gpu:' + str(gpu_id)):
        model.test(dataset, ckpt_path, dataset.pred_masks_path,
                   dataset.img_pred_masks_path, segnet_stream)

# Combine original images with predicted instance masks
dataset.combine_images_with_predicted_masks()
Exemple #17
0
if __name__ == '__main__':

    # parameters
    EPOCH = 15
    ITERATIONS = 1000
    ALPHA = 0.01

    print('---Training---')
    net = model.train(EPOCH, ITERATIONS, ALPHA)

    print('\n---Testing 1(input 1000 to break)---')
    while (1):
        num1 = int(input('\ninput 1st integer:'))
        if (num1 == 1000): break
        num2 = int(input('input 2nd integer:'))
        ans = model.test(net, num1, num2)
        print('#####Result: %d + %d = %d#####' % (num1, num2, ans))

    print('\n---Testing 2---')
    right = 0
    total = 0
    for num1 in range(0, 128):
        for num2 in range(0, 128):
            ans = model.test(net, num1, num2)
            print('#####Result: %d + %d = %d#####' % (num1, num2, ans))
            if (ans == (num1 + num2)):
                right += 1
            total += 1
    print('Accuracy:', right / total)
    except FileExistsError as e:
        if not os.path.isdir(args.sess_dir):
            raise e  # 못만들면 에러 뜸.
    writers = {
        phase: tf.summary.FileWriter(os.path.join(args.sess_dir, phase))
        for phase in ["train", "valid"]
    }  # 위에 만든 디렉도리 안에 train, valid 하위 폴더 생성.
    utils.load_config(
        args.config
    )  # utils 폴더의 __init__.py 파일의 load_config 함수 : 파라미터들의 기본값을 지정.

    model = model.KoSR(
        args.sess_dir)  # model 폴더의 __init__.py 파일의 class KoSR을 불러온다.
    with tf.device(model.device):  # 학습 device 선택 (GPU)
        train_batch, valid_batch, test_batch = utils.load_batches(
        )  # utils 폴더의 __init__.py 파일의 load_batches 함수

    with tf.Session(config=model.config) as sess:
        saver = tf.train.Saver(max_to_keep=1, keep_checkpoint_every_n_hours=1)
        try:
            saver.restore(sess, tf.train.latest_checkpoint(
                args.sess_dir))  # save the checkpoint
        except ValueError:
            log.info("==== TRAINING START ====")
            writers["train"].add_graph(sess.graph)
            model.train(sess, train_batch, valid_batch, writers["train"],
                        writers["valid"], saver)
            saver.save(sess, model.path)
        log.info("===== TESTING START ====")
        model.test(sess, test_batch)
import tensorflow as tf
import os
from model import train, test
from configuration import get_config

config = get_config()
tf.reset_default_graph()

if __name__ == "__main__":
    # start training
    if config.train:
        print("\nTraining Session")
        os.makedirs(config.model_path)
        train(config.model_path)
    # start test
    else:
        print("\nTest session")
        if os.path.isdir(config.model_path):
            test(config.model_path)
        else:
            raise AssertionError("model path doesn't exist!")
Exemple #20
0
                                             model_name='shallow_speech')
        model.train(aishell,
                    epoch=1000000,
                    batch_size=32,
                    lr=0.1,
                    params_path=None)
        # model.train(aishell, epoch=1000000, batch_size=32, lr=0.01, params_path='data/shallow_speech-0.1-12-4.481357229064059')  # 0-12,
        # model.train(aishell, epoch=1000000, batch_size=32, lr=0.01, weight_decay=0., params_path='data/deep_speech2-0.01-4-2.2333707728450722')  # 18
        # model.train(aishell, epoch=1000000, batch_size=16, lr=0.001, weight_decay=0., params_path='data/deep_speech2-0.01-4-2.2333707728450722')  # 18
        # model.train(aishell, epoch=1000000, batch_size=32, lr=0.1 / 32, momentum=0., weight_decay=0., params_path=None)
    elif parse == 'value':
        root = '/share/datasets/data_aishell'
        aishell = datasets.Aishell(root)
        # net = deep_speech2.DeepSpeech2(201, 4231)
        net = shallow_speech.ShallowSpeech()
        model = model.SpeechRecognitionModel(net,
                                             deep_speech2.ctc_loss,
                                             device='cpu')
        model.value(
            aishell.train_datas(1, 'dev')[:100],
            params_path='data/shallow_speech-0.1-14-4.4856919112791225')
    elif parse == 'test':
        root = '/share/datasets/data_aishell'
        aishell = datasets.Aishell(root)
        net = deep_speech2.DeepSpeech2(201, 4231)
        # net = shallow_speech.ShallowSpeech()
        model = model.SpeechRecognitionModel(net, deep_speech2.ctc_loss)
        model.test(wav_path='test.wav',
                   params_path='data/deep_speech2-0.01-4-2.2494239925925266',
                   id2word=aishell.id2word)
Exemple #21
0
# -*- coding: utf-8 -*-
"""
Created on Sat Jun 27 12:54:06 2020

@author: zubair
"""

import numpy as np
import pandas as pd
import model

### call your classification dataset here
data = pd.read_csv("vertebrate.csv")

## call the attribute indexes here
cols_x = [1, 2, 3, 4, 5, 6, 7]
x = data[data.columns[cols_x]]

## call the label index from dataset here
cols_y = [8]
y = data[data.columns[cols_y]]

### pass your test record directly here only with attribute values only.
test_record_1 = ['cold-blooded', 'scales', 'yes', 'yes', 'no', 'no', 'no']

clas, prob = model.test(test_record_1, x, y)

#np.argmax(prob)
print("Your test record is from Class =========> " + clas[np.argmax(prob)])
Exemple #22
0
DATA = pd.read_csv('dataset/Indian_pines.csv', header=None).values
data_D = DATA[:, :-1]
data_L = DATA[:, -1]
#%%
# print(data_D.shape)
data_D = data_D.reshape(data_D.shape[0], data_D.shape[1], 1, 1)
# print(data_D.shape)
#%%
data_set = GetLoader(data_D, data_L)
data_loader = DataLoader(data_set, batch_size=BATCH_SIZE, shuffle=False)

net = ResNet(INPUT_CHANNELS, CLASSES)
net.load_state_dict(
    torch.load('checkpionts/ResNet_run50_0.7929756097560976.pth'))

pred_labels = test(net, data_loader, DEVICE)
#%%
new_label = []
for i in range(len(pred_labels)):
    new_label.extend(pred_labels[i].tolist())
#%%
# for l in [new_label[i] for i in range(len(new_label))]:
#     print(l,end=',')
#%%
pred_matrix = np.zeros((data.shape[0], data.shape[1]))
count = 0
for i in range(data.shape[0]):
    for j in range(data.shape[1]):
        if label[i][j] != 0:
            pred_matrix[i][j] = new_label[count]
            count += 1
import config
import datetime

if __name__ == "__main__":
    with open(
            os.path.join(
                config.LOG_DIR,
                str(datetime.datetime.now().strftime("%Y%m%d")) + "_" +
                str(config.BATCH_SIZE) + "_" + str(config.NUM_EPOCHS) +
                ".txt"), "w") as log:
        log.write(str(datetime.datetime.now()) + "\n")
        log.write("Use Pretrained Weights: " + str(config.USE_PRETRAINED) +
                  "\n")
        log.write("Pretrained Model: " + config.PRETRAINED + "\n")
        # READ DATA
        train_data = data.DATA(config.TRAIN_DIR)
        print("Train Data Loaded")
        # BUILD MODEL
        model = model.MODEL()
        print("Model Initialized")
        model.build()
        print("Model Built")
        # TRAIN MODEL
        model.train(train_data, log)
        print("Model Trained")
        # TEST MODEL
        test_data = data.DATA(config.TEST_DIR)
        print("Test Data Loaded")
        model.test(test_data, log)
        print("Image Reconstruction Done")
Exemple #24
0
from PyQt4 import QtCore,QtGui
import sys
import viewnew
from model import test
app = QtGui.QApplication(sys.argv)
window = QtGui.QMainWindow()
ui = viewnew.Ui_MainWindow()
ui.setupUi(window)

QtCore.QObject.connect(ui.runHGMS, QtCore.SIGNAL("clicked()"), lambda: test())
QtCore.QObject.connect(ui.writeLineButton, QtCore.SIGNAL("clicked()"), lambda: writeline())


def writeline():
    print(ui.lineEdit.text())

window.show()
sys.exit(app.exec_())
Exemple #25
0
        print "\t %s : %s " % (k, v)

    print 'Debeg | run_this_code '
    print 'Train  params : '
    for k, v in train_params.items():
        print "\t %s : %s " % (k, v)

    print("prepare training data..")
    data_provider = utils.get_data_provider_by_name(args.dataset, train_params)

    print data_provider
    print "initialize_model..."

    print model_params
    model = model.DenseNet(data_provider=data_provider, **model_params)

    if args.train:
        print "Data provider train images :", data_provider.train.num_examples
        model.train_all_epochs(train_params)

    if args.test:
        print 'Test Mode'
        if not args.train:
            model.load_model()

        print "Data provider test images : ", data_provider.test.num_examples
        print "Testing..."
        loss, accuracy = model.test(data_provider.test, batch_size=200)
        print "mean cross_entropy: %f , mean accuracy : %f" % (loss, accuracy)
Exemple #26
0
import tensorflow as tf
import os
from model import train, test
from configuration import get_config

config = get_config()
tf.reset_default_graph()

if __name__ == "__main__":
    # start training
    if config.train:
        print("\nTraining Session")
        # os.makedirs(config.model_path)
        train(config.model_path)
    # start test
    else:
        print("\nTest session")
        if os.path.isdir(config.model_path):
            test(config.model_path)
        else:
            raise AssertionError("model path doesn't exist!")
Exemple #27
0
        "original_signal": original_signal,
        "rolled_signal": rolled_signal
    })
    return (df)


def preprocess(df):
    """
    Modify code here to make the signal suitable for TadGAN.
    """
    signal = df

    signal = normalize(signal)
    signal = make_rolling_data(signal)

    return signal


if __name__ == "__main__":

    df = pd.read_csv('exchange-2_cpc_results.csv')

    signal = df.value
    signal = preprocess(signal)

    model = model.TadGAN(dataset=signal)

    model.train(n_epochs=1000)

    df = model.test()