Exemplo n.º 1
0
import argparse
import numpy as np
import tensorflow as tf
import tqdm
from data_providers import QuickDrawImageDataProvider
from network_builder import ClassifierNetworkGraph
from utils.parser_utils import ParserClass
from utils.storage import build_experiment_folder, save_statistics

tf.reset_default_graph()  # resets any previous graphs to clear memory
parser = argparse.ArgumentParser(
    description='Welcome to CNN experiments script'
)  # generates an argument parser
parser_extractor = ParserClass(
    parser=parser)  # creates a parser class to process the parsed input

batch_size, seed, epochs, logs_path, continue_from_epoch, tensorboard_enable, batch_norm, \
strided_dim_reduction, experiment_prefix, dropout_rate_value, rnn_dropout_rate_value, layer_stage_sizes, \
rnn_cell_type, bidirectional, rnn_stage_sizes, conv_rnn_sizes, num_classes_use, inner_layer_depth, \
filter_size, num_dense_layers, num_dense_units, network_name, rotate = parser_extractor.get_argument_variables()
# returns a list of objects that contain our parsed input

convnet_desc = ""
if batch_norm:
    convnet_desc = convnet_desc + "BN"

for ls in layer_stage_sizes:
    convnet_desc = "{}_{}".format(convnet_desc, ls)

if network_name == 'fcn':
    experiment_name = "exp{}_{}_{}_layers".format(experiment_prefix,
Exemplo n.º 2
0
import argparse
import numpy as np
import tensorflow as tf
import tqdm
from data_providers import CIFAR10DataProvider
from network_builder import ClassifierNetworkGraph
from utils.parser_utils import ParserClass
from utils.storage import build_experiment_folder, save_statistics

tf.reset_default_graph()  # resets any previous graphs to clear memory
parser = argparse.ArgumentParser(
    description='Welcome to CNN experiments script'
)  # generates an argument parser
parser_extractor = ParserClass(
    parser=parser)  # creates a parser class to process the parsed input

batch_size, seed, epochs, logs_path, continue_from_epoch, tensorboard_enable, batch_norm, \
strided_dim_reduction, experiment_prefix, dropout_rate_value = parser_extractor.get_argument_variables()
# returns a list of objects that contain
# our parsed input

experiment_name = "experiment_{}_batch_size_{}_bn_{}_mp_{}".format(
    experiment_prefix, batch_size, batch_norm, strided_dim_reduction)
#  generate experiment name

rng = np.random.RandomState(seed=seed)  # set seed

train_data = CIFAR10DataProvider(which_set="train",
                                 batch_size=batch_size,
                                 rng=rng)
val_data = CIFAR10DataProvider(which_set="valid",
Exemplo n.º 3
0
    vocab = {}
    words = []
    PADWORD = 'PADDING'
    vocab[PADWORD] = 0
    words.append(PADWORD)
    for word, index in vocab_processor.vocabulary_._mapping.items():
        vocab[word] = index + 1
        words.append(word)
    return vocab, words


tf.reset_default_graph()  # resets any previous graphs to clear memory
parser = argparse.ArgumentParser(
    description='Welcome to CNN experiments script'
)  # generates an argument parser
parser_extractor = ParserClass(
    parser=parser)  # creates a parser class to process the parsed input

batch_size, seed, epochs, logs_path, continue_from_epoch, tensorboard_enable, embedding_dim, \
filter_sizes, num_filters, experiment_prefix, dropout_rate_value, pt_embeddings, static_embeddings, \
l2_norm, activation, typec, cell, hidden_unit, num_units = parser_extractor.get_argument_variables()

# returns a list of objects that contain
# our parsed input

experiment_name = "experiment_{}_batch_size_{}_ptembed_{}".format(
    experiment_prefix, batch_size, pt_embeddings)
#  generate experiment name

rng = np.random.RandomState(seed=seed)  # set seed

train_data = TwitterDataProvider(which_set="train",