Beispiel #1
0
def Data_load(num_timesteps_input, num_timesteps_output):
    A, X, means, stds, nodes = load_metr_la_data()
    # A, X, max_X, min_X = load_metr_la_data()
    # A, X, max_value, X_val = load_metr_la_data()

    split_line1 = int(X.shape[2] * 0.6)
    split_line2 = int(X.shape[2] * 0.8)

    train_original_data = X[:, :, :split_line1]
    val_original_data = X[:, :, split_line1:split_line2]
    # val_original_data = X_val
    test_original_data = X[:, :, split_line2:]

    training_input, training_target = generate_dataset(
        train_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)
    val_input, val_target = generate_dataset(
        val_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)
    test_input, test_target = generate_dataset(
        test_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)

    return A, means, stds, training_input, training_target, val_input, val_target, test_input, test_target, nodes
Beispiel #2
0
def Data_load(num_timesteps_input, num_timesteps_output):
    A, X, means, stds, X_val = load_metr_la_data()

    # split_line1 = int(X.shape[0] * 0.6)
    # split_line2 = int(X.shape[0] * 0.8)

    # train_original_data = X[:, :, :split_line1]
    train_original_data = X
    # val_original_data = X[:, :, split_line1:split_line2]
    val_original_data = X_val
    test_original_data = X  #X[split_line1:, :, :]

    training_input, training_target = generate_dataset(
        train_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)

    val_input, val_target = generate_dataset(
        val_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)

    test_input, test_target = generate_dataset(
        test_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)

    return A, means, stds, training_input, training_target, val_input, val_target, test_input, test_target
Beispiel #3
0
def main():
    print('Welcome aboard. Fasten your seatbelts')
    #read parameters
    arguments = read_args()

    #download pre-trained model and initialize
    model, criterion, optimizer = utils.create_model(arguments.arch,
                                               arguments.hidden_units,
                                               n_classes,
                                               arguments.learning_rate,
                                               dropout_per)
    
    #gpu or cpu
    processor = utils.get_processor(arguments.gpu)

    #generate and normalize images for training and validation
    train_dataset, train_classes_ids = utils.generate_dataset('train', arguments.data_dir)
    valid_dataset, test_classes_ids = utils.generate_dataset('valid', arguments.data_dir)
 
    #train network
    model, optimizer = do_deep_learning(processor, 
                                        model, 
                                        criterion, 
                                        optimizer, 
                                        arguments.epochs, 
                                        train_dataset, 
                                        valid_dataset)

    #test network with test dataset
    test_dataset, test_classes_ids = utils.generate_dataset('test', arguments.data_dir)
    check_accuracy_on_test(test_dataset,
                           model,
                           processor)

    #save checkpoint
    save_params_dict = {'hidden_size': arguments.hidden_units,
                        'output_size': n_classes,
                        'dropout_per': dropout_per,
                      'learning_rate': arguments.learning_rate,
                     'epochs_trained': arguments.epochs,
                        'img_mapping': train_classes_ids,
                    'optimizer_state': optimizer.state_dict(),
                        'model_state': model.state_dict(),
                         'classifier': model.classifier,
                               'arch': arguments.arch,
                         'input_size': model.classifier[0].in_features}

    save_checkpoint(utils.basepath+arguments.save_dir+checkpoint_name,save_params_dict)

    print("**INFO: Training Ended!")
def main():
    X = generate_dataset(shape="blobs")
    D = pairwise_distances(X)  # euclidean distance as distance metric
    A = gaussian_kernel(D, is_sym=True)  # Gaussian distance as affinity metric

    # K-MEANS
    clusters, _ = apply_kmeans(X)
    plot_clustering_result(X,
                           A,
                           clusters,
                           clustering_name="K means clustering")

    # DBSCAN
    clusters, noise = apply_dbscan(X, D)
    plot_clustering_result(X,
                           A,
                           clusters,
                           noise,
                           clustering_name="DBSCAN clustering")

    # EIGENVECTOR BASED CLUSTERING
    A_eigen = gaussian_kernel(
        D, mult=0.05, is_sym=True)  # Gaussian distance as affinity metric
    clusters, noise = apply_eigenvector_based(X, A_eigen)
    plot_clustering_result(X,
                           A_eigen,
                           clusters,
                           noise,
                           clustering_name="Eigenvector based clustering")
Beispiel #5
0
 def __init__(self, X, train_fraction=0.1, consider_explicit=False):
     self.X_train, self.X_validation, self.X_test, self.test_list = utils.generate_dataset(
         X, train_fraction)
     self.n = np.shape(X)[0]
     self.consider_explicit = consider_explicit
     self.U = np.random.rand((self.r, self.n))
     self.V = np.random.rand((self.r, self.n))
     self.update_estimation()
Beispiel #6
0
def generate_datafile_old(number_items=1000):
    """
    Create the samples.py file
    """
    from utils import get_names, generate_dataset
    from pprint import pprint
    filename = "samples.py"
    dataset = generate_dataset(number_items)
    fo = open(filename, "wb")
    fo.write("#!/usr/bin/env python\n")
    fo.write("# -*- coding: utf-8 -*-\n")
    fo.write("#Brainaetic: http://www.thenetplanet.com\n\n")
    fo.write("SAMPLES = ")
    pprint(dataset, fo)
    fo.close()
    print "%s generated with %d samples"%(filename, number_items)
Beispiel #7
0
def generate_datafile_old(number_items=1000):
    """
    Create the samples.py file
    """
    from utils import get_names, generate_dataset
    from pprint import pprint
    filename = "samples.py"
    dataset = generate_dataset(number_items)
    fo = open(filename, "wb")
    fo.write("#!/usr/bin/env python\n")
    fo.write("# -*- coding: utf-8 -*-\n")
    fo.write("#Brainaetic: http://www.thenetplanet.com\n\n")
    fo.write("SAMPLES = ")
    pprint(dataset, fo)
    fo.close()
    print "%s generated with %d samples" % (filename, number_items)
Beispiel #8
0
def plot_noise_condition(k, l, outfile="noise.pdf", n=100000, a=DEF_A):
    x, y = generate_dataset(n=n, a=a)
    filt = np.logical_or(y == k, y == l)
    vals = list()
    for x0 in x[filt]:
        etas = get_etas(x0, a=a)
        eta_kl = etas[k] / (etas[k] + etas[l])
        vals.append(np.abs(eta_kl - 1 / 2))
    n_classes = len(etas)

    plt.figure(figsize=(4, 2))
    plt.title("class {} vs class {} (k={}, l={})".format(k, l, k, l))
    colors = cm.rainbow(np.linspace(0, 1, n_classes))
    plt.hist(vals, bins=100, density=True)
    plt.ylabel(r"$P\{ \| \eta_{k,l}(x) - 1/2 \| > t \}$")
    plt.xlabel("$t$")
    plt.grid()
    plt.savefig(outfile)
Beispiel #9
0
    A, X, means, stds = load_metr_la_data()
    print(A.shape)
    split_line1 = int(len(X) * 0.6)
    split_line2 = int(len(X) * 0.8)
    #
    train_original_data = X[:split_line1]

    val_original_data = X[split_line1:split_line2]
    # val_mean, val_std = means[split_line1:split_line2], stds[split_line1:split_line2]

    test_original_data = X[split_line2:]
    # test_mean, test_std = means[split_line2:], stds[split_line2:]

    training_input, training_target, train_mean_t, train_std_t = generate_dataset(
        train_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output,
        means=means,
        stds=stds)
    val_input, val_target, val_mean_t, val_std_t = generate_dataset(
        val_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output,
        means=means,
        stds=stds)
    test_input, test_target, test_mean_t, test_std_t = generate_dataset(
        test_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output,
        means=means,
        stds=stds)
              epochs=5,
              batch_size=2,
              validation_data=(FM, cRM),
              shuffle=False,
              callbacks=[TensorBoard(log_dir='./log'), checkpoint])

    pred_cRM = model.predict(FM)

if MODEL_CHECK2:
    # check data generative function
    people_num = 2
    sample_range = (0, 2)

    repo_path = os.path.expanduser('../../data/audio/audio_train')
    X_data, y_data = avp.generate_dataset(sample_range,
                                          repo_path,
                                          num_speaker=2,
                                          verbose=1)
    print('shape of the X data: ', X_data.shape)
    print('shape of the y data: ', y_data.shape)

    # split data to training and validation set
    X_train, X_val, y_train, y_val = train_test_split(X_data,
                                                      y_data,
                                                      test_size=0.05,
                                                      random_state=30)

    # check feeding tensor to model
    input_dim = (298, 257, 2)
    output_dim = (298, 257, 2, 2)

    # train and load model
Beispiel #11
0
def load_data(args):
    # Loading embeddings matrices
    print(
        "=======================\nLoading embedding files\n======================="
    )
    embedding_dims = []
    embedding_matrices = []
    if "glove" in args.embeddings:
        emb_fn = "msrpc_{}_{}_{}_{}_{}.pickle".format(args.pp_name,
                                                      args.lower_opt, "glove",
                                                      args.emb_opt,
                                                      args.version)
        print("loading ...", emb_fn)
        [embeddings_matrix,
         unknown_words] = pickle.load(open("./data/" + emb_fn, 'rb'))
        print("Embeddings shape (pretrained GloVe): {}  unknown tokens: {}".
              format(embeddings_matrix.shape, len(unknown_words)))
        embedding_dims.append(embeddings_matrix.shape[1])
        embedding_matrices.append(embeddings_matrix)

    if "POSword2vec" in args.embeddings:
        emb_fn = "msrpc_{}_{}_{}_{}_{}.pickle".format(args.pp_name,
                                                      args.lower_opt,
                                                      "POSword2vec",
                                                      args.emb_opt,
                                                      args.version)
        print("loading ...", emb_fn)
        [embeddings_matrix,
         unknown_words] = pickle.load(open("./data/" + emb_fn, 'rb'))
        print(
            "Embeddings shape (pretrained POS word2vec): {}  unknown tokens: {}"
            .format(embeddings_matrix.shape, len(unknown_words)))
        embedding_dims.append(embeddings_matrix.shape[1])
        embedding_matrices.append(embeddings_matrix)

    if "paragram25" in args.embeddings:
        emb_fn = "msrpc_{}_{}_{}_{}_{}.pickle".format(args.pp_name,
                                                      args.lower_opt,
                                                      "paragram25",
                                                      args.emb_opt,
                                                      args.version)
        print("loading ...", emb_fn)
        [embeddings_matrix,
         unknown_words] = pickle.load(open("./data/" + emb_fn, 'rb'))
        print("Embeddings shape (pretrained PARAGRAM): {}  unknown tokens: {}".
              format(embeddings_matrix.shape, len(unknown_words)))
        embedding_dims.append(embeddings_matrix.shape[1])
        embedding_matrices.append(embeddings_matrix)

    print("Final embeddings dim:", sum(embedding_dims))
    print("\n")

    # Generating datasets from parsed MSRPC
    print("===================\nGenerating datasets\n===================")
    (index_to_word, word_to_index, X_train1, X_train2, Y_train, X_test1,
     X_test2,
     Y_test) = utils.generate_dataset(args.pp_name,
                                      args.lower_opt,
                                      args.version,
                                      max_seq_length=-1,
                                      reverse_train_pairs=args.reversed_train,
                                      padding=True,
                                      autoneg=args.autoneg)

    max_seq_length = X_train1.shape[1]
    print("Max seq length:", max_seq_length)
    print("X_train:", X_train1.shape)
    print("Y_train:", Y_train.shape)
    print("X_test:", X_test1.shape)
    print("Y_test:", Y_test.shape)
    print("\n")
    return (word_to_index, max_seq_length, X_train1, X_train2, Y_train,
            X_test1, X_test2, Y_test, embedding_dims, embedding_matrices)
Beispiel #12
0
    # Path to vocabs for indexing
    params["word_emb_vocab"] = os.path.join(params["embedding_path"],
                                            "vocab.txt")
    params["char_vocab"] = os.path.join(params["embedding_path"], "chars.txt")
    cond = os.path.isfile(params["word_emb_vocab"]) and os.path.isfile(
        params["char_vocab"])
    if not cond:
        train_path = glob(os.path.join(params['filepath'], '*train*'))[0]
        create_full_vocab(train_path, params['embedding_path'])

    # Create dataset files if not already done:
    params['data_path'] = os.path.join(params['log_dir'], 'dataset_lm')
    if not os.path.isdir(params['data_path']):
        generate_dataset(filedir=params['filepath'],
                         mode='lm',
                         logdir=params['data_path'],
                         n_grams=params['add_n_grams_deps'])

    # Get nb_chars, nb_labels, & nb_words for params (used in model):
    with open(params["word_emb_vocab"], "r", encoding="utf-8") as f:
        lines = f.readlines()
        params["vocab_size"] = len(lines)
        params["max_len_sent"] = max(map(len, lines))
    with open(params["char_vocab"], "r", encoding="utf-8") as f:
        params["nb_chars"] = len(f.readlines())

    params['frequencies'] = genfromtxt(os.path.join(params['embedding_path'],
                                                    'freq.txt'),
                                       delimiter="\t")
    params['activation_finish'] = tf.nn.leaky_relu
    # Create input functions
Beispiel #13
0
    train_pairs = readdata.read_snli(args.train)
    logger.info('Reading validation data')
    valid_pairs = readdata.read_snli(args.validation)
    logger.info('Reading word embeddings')
    word_dict, embeddings = readdata.load_embeddings(args.embeddings,
                                                     args.vocab)
    readdata.write_extra_embeddings(embeddings, args.save)
    embeddings = utils.normalize_embeddings(embeddings)
    logger.debug(
        'Embeddings have shape {} (including unknown, padding and null)'.
        format(embeddings.shape))

    logger.info('Converting words to indices')
    max_size1, max_size2 = utils.get_max_sentence_sizes(
        train_pairs, valid_pairs)
    train_data = utils.generate_dataset(train_pairs, word_dict, max_size1,
                                        max_size2)
    valid_data = utils.generate_dataset(valid_pairs, word_dict, max_size1,
                                        max_size2)

    # count the NULL token (it is important when there's no alignment for a given word)
    max_size1 += 1
    max_size2 += 1

    msg = '{} sentences have shape {} (firsts) and {} (seconds)'
    logger.debug(
        msg.format('Training', train_data.sentences1.shape,
                   train_data.sentences2.shape))
    logger.debug(
        msg.format('Validation', valid_data.sentences1.shape,
                   valid_data.sentences2.shape))
Beispiel #14
0
    A_wave = get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave)
    # A_wave = torch.memory_format(padding)
    A_wave = A_wave.to(device=args.device)

    split_line1 = int(len(X) * 0.6)
    split_line2 = int(len(X) * 0.8)

    val_original_data = X[split_line1:split_line2]
    val_mean, val_std = means[split_line1:split_line2], stds[
        split_line1:split_line2]

    val_input, val_target, val_mean_t, val_std_t = generate_dataset(
        val_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output,
        means=val_mean,
        stds=val_std)
    test_original_data = X[split_line2:]
    test_mean, test_std = means[split_line2:], stds[split_line2:]

    test_input, test_target, test_mean_t, test_std_t = generate_dataset(
        test_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output,
        means=test_mean,
        stds=test_std)

    batch_size = 100
    num_batches = val_target.shape[0] // batch_size
    for batch_idx in range(0, num_batches + 1):
# word2vec, glove, paragram
emb_name = "word2vec"
# Duplicate training by switching pairs
reverse_train = False
# Randomly generate negative samples
autoneg = 0
#######################
# END DATA PARAMETERS #
#######################

# Generating dataset from parsed MSRPC
(index_to_word, word_to_index, X_train1, X_train2, Y_train, X_test1, X_test2,
 Y_test) = utils.generate_dataset(pp_name,
                                  lower_opt,
                                  version,
                                  max_seq_length=30,
                                  reverse_train_pairs=reverse_train,
                                  padding=True,
                                  autoneg=autoneg)
max_seq_length = X_train1.shape[1]
print("Max seq length:", max_seq_length)
print("X_train:", X_train1.shape)
print("Y_train:", Y_train.shape)
print("X_test:", X_test1.shape)
print("Y_test:", Y_test.shape)

# Loading embeddings matrix
emb_fn = "msrpc_{}_{}_{}_{}_{}.pickle".format(pp_name, lower_opt, emb_name,
                                              emb_opt, version)
[embedding_matrix, unknown_words] = pickle.load(open("./data/" + emb_fn, 'rb'))
vocab_size = embedding_matrix.shape[0]
Beispiel #16
0
print(f"Vocab Size with >= {threshold} is {len(vocab)}")


#Loading Image Encodings
read_file = open("img_encoded/img_inceptionv3_encoding.pkl", "rb")
img_encodings = pickle.load(read_file)
read_file.close()


print("Total Images : ", len(img_encodings))

# print(img_encodings[list(img_encodings.keys())[0]]) : Prints first img numpy array


#Caption Dataset
train_df,test_df = generate_dataset()

############# Data Specifications ########################
print("Unique Train Images :", len(set(train_df['image'])))
print("Unqiue Test Images : ", len(set(test_df['image'])))

print("Total Train Instances :", train_df.shape[0])
print("Total Test Instances : ", test_df.shape[0])
###########################################################

train_df['caption'] = train_df['caption'].apply(lambda text: "<SOS> " + text + " <EOS>")
test_df['caption'] = test_df['caption'].apply(lambda text: "<SOS> " + text + " <EOS>")

train_descriptions = train_df.groupby(['image']).apply(lambda sub_df : sub_df["caption"].to_list()).to_dict()
test_descriptions = test_df.groupby(['image']).apply(lambda sub_df : sub_df["caption"].to_list()).to_dict()
Beispiel #17
0
import numpy as np
import utils
from numba import int64
from numba.typed import List
import numba_test

X = np.load("data/wiki/wiki.npy")
X_train, X_validation, X_test, test_list = utils.generate_dataset(X, 0.6)
numba_test.SGD_algorithm(X_train, 1e-4, 1e-4, 16, 1e-4, 100)
Beispiel #18
0
    netG.load_state_dict(torch.load(args.netG))
    print(netG)
    if args.cuda:
        netG.cuda()

###
# Create Generative validation set and Generative training set if necessary.
###

# If we need one day a validation set on generated data.
# print('Create Validation set with generated data ...')
# validloader_gen = utils.generate_dataset(netG, 10000, args.batchSize, args.workers, args.nz, n_class)

if (args.training_size != -1) and (args.train_real == False):
    trainloader_gen = utils.generate_dataset(netG, args.training_size,
                                             args.batchSize, args.workers,
                                             args.nz, n_class)

if args.cuda:
    model.cuda()

optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       betas=(args.beta1, 0.999))
criterion = nn.CrossEntropyLoss()


def train(epoch, train_loader=None):
    model.train()
    if train_loader != None:
        for batch_idx, (data, target) in enumerate(train_loader):
Beispiel #19
0
def create_score_map(test_target,
                     data_name,
                     config,
                     ratio,
                     model_path=None,
                     save_score_map=False,
                     save_image=False):
    MODEL_DIR = '.\\logs'
    # check config
    utils.check_config(config)
    # create model
    model_inference = modellib.MaskRCNN(mode="inference",
                                        config=config,
                                        model_dir=MODEL_DIR)
    if model_path is None:
        model_path = model_inference.find_last()
    print("Loading weights from ", model_path)
    model_inference.load_weights(model_path, by_name=True)
    # generate dataset
    # 检查data是否包含small
    if 'pad' in data_name:
        _start, _end = re.search('pad', data_name).span()
        data_pad = int(data_name[:_start].split('_')[-1])
    else:
        data_pad = 0
    print('test_target: %s, data_pad: %s' % (test_target, data_pad))
    dataset_test = utils.generate_dataset(test_target, data_name)
    image_shape = utils.get_image_shape(test_target)
    idsNum = len(dataset_test.image_ids)

    # generate score_map_total
    score_map_total = np.zeros(
        (image_shape[1] // ratio, image_shape[0] // ratio,
         config.NUM_CLASSES - 1),
        dtype=np.float32)

    for image_id in dataset_test.image_ids[:]:
        if image_id % 50 == 0:
            print('%s/%s' % (image_id, idsNum))
        # generate image
        image_path = dataset_test.image_info[image_id]['path']
        image = skimage.io.imread(image_path)
        # generate LMB
        LMB_path = dataset_test.image_info[image_id]['LMB_path']
        LMB = skimage.io.imread(LMB_path)
        # generate x1,y1,interval
        x1, y1, interval = image_path.split('\\')[-1].split('.')[-2].split(
            '_')[2:5]

        def _ratio(x):
            return int(x) // ratio

        x1_ratio, y1_ratio, interval_ratio = list(
            map(_ratio, [x1, y1, interval]))
        x1_ratio -= data_pad // ratio
        y1_ratio -= data_pad // ratio
        score_map_total_x1, score_map_total_y1 = x1_ratio, y1_ratio
        score_map_total_x2 = score_map_total_x1 + interval_ratio
        score_map_total_y2 = score_map_total_y1 + interval_ratio
        score_map_x1, score_map_y1 = 0, 0
        if x1_ratio < 0:
            score_map_total_x1 = 0
            score_map_x1 = -x1_ratio
        if y1_ratio < 0:
            score_map_total_y1 = 0
            score_map_y1 = -y1_ratio

        # predict results
        results = model_inference.detect([image], verbose=0)
        r = results[0]

        for n, class_id in enumerate(r['class_ids']):
            # generate mask_n
            mask_n = r['masks'][:, :, n].astype(np.float32)
            mask_n = mask_n * LMB
            if ratio != 1:
                mask_n = cv2.resize(mask_n, (interval_ratio, interval_ratio),
                                    interpolation=cv2.INTER_NEAREST)
            # generate score_map
            score_n = r['scores'][n]
            score_map = mask_n * score_n
            # 取score_map_total和score_map对应pixel的较大值赋值到score_map_total
            score_map_total[score_map_total_y1:score_map_total_y2,
                            score_map_total_x1:score_map_total_x2,
                            class_id - 1] = np.maximum(
                                score_map[score_map_y1:, score_map_x1:],
                                score_map_total[
                                    score_map_total_y1:score_map_total_y2,
                                    score_map_total_x1:score_map_total_x2,
                                    class_id - 1])

    # save phase
    save_dir = '.\\merge_submit'
    if not os.path.exists(save_dir):
        os.mkdir(save_dir)
    model_stamp = utils.create_model_stamp(model_path)
    image_name = '%s_%ssmall_%spad' % (test_target, ratio, data_pad)
    # save score_map_total as npy
    if save_score_map:
        npy_save_path = os.path.join(
            save_dir, '%s_scoremap_%s.npy' % (image_name, model_stamp))
        np.save(npy_save_path, score_map_total)
        print(npy_save_path, 'has been saved succefully.')
    # save and show score_map as png by class
    if save_image:
        rows = config.NUM_CLASSES - 1
        fig, axs = plt.subplots(rows, 1, figsize=(16, 16 * rows))

        if ratio == 1:
            image_path = ".\\offical_data\\jingwei_round1_test_a_20190619\\%s.png" % test_target
        else:
            image_path = ".\\overview\\%s_%ssmall.png" % (test_target, ratio)
        Image.MAX_IMAGE_PIXELS = None
        image_frame_oringin = skimage.io.imread(image_path)

        for class_id in range(1, config.NUM_CLASSES):
            score_map = score_map_total[:, :, class_id - 1]
            image_frame = image_frame_oringin.copy()
            color = utils.COLOR_MAP[class_id]
            for c in range(3):
                image_frame[:, :, c] = image_frame[:, :, c] * (
                    1 - score_map) + score_map * color[c]

            axs[class_id - 1].imshow(image_frame)
            image_save_path = os.path.join(
                save_dir,
                '%s_class%d_%s.png' % (image_name, class_id, model_stamp))
            skimage.io.imsave(image_save_path, image_frame)
            print(image_save_path, 'has been saved succefully.')

    return score_map_total, model_path
Beispiel #20
0

if __name__ == '__main__':
    torch.manual_seed(7)

    A, X, means, stds = load_metr_la_data()

    split_line1 = int(X.shape[2] * 0.6)
    split_line2 = int(X.shape[2] * 0.8)

    train_original_data = X[:, :, :split_line1]
    val_original_data = X[:, :, split_line1:split_line2]
    test_original_data = X[:, :, split_line2:]

    training_input, training_target = generate_dataset(
        train_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)
    val_input, val_target = generate_dataset(
        val_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)
    test_input, test_target = generate_dataset(
        test_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)

    A_wave = get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave)

    A_wave = A_wave.to(device=args.device)
Beispiel #21
0
args = parser.parse_args()
args.device = None
if args.enable_cuda and torch.cuda.is_available():
    args.device = torch.device('cuda')
    print("Use GPU.")
else:
    args.device = torch.device('cpu')
    print("Use CPU.")

if __name__ == '__main__':
    torch.manual_seed(7)
    A, X, means, stds = load_metr_la_data()
    split_line2 = int(X.shape[2] * 0.8)
    test_original_data = X[:, :, split_line2:]
    test_input, test_target = generate_dataset(
        test_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)
    print("INFO: Test data load finish!")
    A_wave = get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave)

    A_wave = A_wave.to(device=args.device)

    net = STGCN(A_wave.shape[0], test_input.shape[3], num_timesteps_input,
                num_timesteps_output).to(device=args.device)

    net.load_state_dict(torch.load('parameter.pkl'))
    print("INFO: Load model finish!")

    loss_criterion = nn.MSELoss()
Beispiel #22
0
import utils

model_file = 'DeepPoemModel.h5'
checkpoint_dir = './training_checkpoints'

batch_size = 64
epochs = 50
learning_rate = 0.001
drop_rate = 0.1
word_vec_size = 256
rnn_size = 1024
rnn_layers = 3
dense_layers = 1

idx2word, word2idx, x_train, y_train = utils.generate_dataset(True)
vob_size = len(idx2word)


def create_model_cell():
    input_data = layers.Input(shape=(None, ))
    initial_state_1 = [
        layers.Input(shape=(128, )),
        layers.Input(shape=(128, ))
    ]
    initial_state_2 = [
        layers.Input(shape=(128, )),
        layers.Input(shape=(128, ))
    ]
    x = layers.Embedding(input_dim=vob_size,
                         output_dim=word_vec_size)(input_data)
Beispiel #23
0
    np.save("val_cd.npy", X[:, :, split_line1:split_line2])
    np.save("test_cd.npy", X[:, :, split_line2:])
    means = np.mean(X[:, :, :split_line1], axis=(0, 2))
    stds = np.std(X[:, :, :split_line1], axis=(0, 2))
    X = X - means[0]
    X = X / stds[0]
    print(means)
    print(stds)
    print(X.shape)

    train_original_data = X[:, :, :split_line1]
    val_original_data = X[:, :, split_line1 - 1008:split_line2]
    test_original_data = X[:, :, split_line2 - 1008:]

    training_input, training_daily_input, training_weekly_input, training_coarse_input, training_target = generate_dataset(
        train_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)
    val_input, val_daily_input, val_weekly_input, val_coarse_input, val_target = generate_dataset(
        val_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)
    test_input, test_daily_input, test_weekly_input, test_coarse_input, test_target = generate_dataset(
        test_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output)

    print(training_input.shape)
    print(training_target.shape)
    if args.model == 'Traj-net':
        net = PCN(X.shape[0], training_input.shape[3], num_timesteps_input,
                  num_timesteps_output).to(device=args.device)