Пример #1
0
 def setUp(self):
     odx = ODX(0, 1)
     odx.load_gtsf()
     day = dt.datetime.strptime("01/30/18 00:00", "%m/%d/%y %H:%M")
     megas = odx.preprocess_gtsf(day)
     builder = NetworkBuilder(700)
     self.net = builder.build(megas, 1)
Пример #2
0
def main():

    sess = tf.Session()

    image = read_image('../data/heart.jpg')
    image = np.reshape(image, [1, 224, 224, 3])  # type numpy.ndarray
    image.astype(np.float32)

    parser = Parser('../data/alexnet.cfg')
    network_builder = NetworkBuilder("test")  # type: NetworkBuilder
    network_builder.set_parser(parser)
    network = network_builder.build()  # type: Network
    network.add_input_layer(InputLayer(tf.float32, [None, 224, 224, 3]))
    network.add_output_layer(OutputLayer())
    network.connect_each_layer()

    sess.run(tf.global_variables_initializer())
    fc_layer = sess.run(network.output, feed_dict={network.input: image})
Пример #3
0
    def __init__(self, start_node: int, end_node: int, render: bool):
        self.__network = NetworkBuilder().build_network
        print("Graph representation build done...\n")
        print("Original node matrix: ")
        print(self.__network.node_matrix.matrix)
        print("######################################################")
        print("Virtual nodes: ")
        print([
            str(node.parent_id) + "_" +
            str(node.id % (len(constants.REQUIRED_SERVICE_FUNCTIONS) + 1))
            for node in self.__network.virtual_nodes
        ])
        print("######################################################")
        pass

        self.__start_node = start_node
        self.__end_node = end_node
        self.view = View(self.__network, render)
        self.view.render(True)
Пример #4
0
def main():
    parser = Parser('../data/alexnet.cfg')
    network_builder = NetworkBuilder("test")
    mnist = input_data.read_data_sets("F:/tf_net_parser/datasets/MNIST_data/",
                                      one_hot=True)  # 读取数据
    network_builder.set_parser(parser)
    network = network_builder.build()  # type: Network
    network.add_input_layer(InputLayer(tf.float32, [None, 28, 28, 1]))
    network.add_output_layer(OutputLayer())
    network.set_labels_placeholder(tf.placeholder(tf.float32, [None, 10]))
    network.connect_each_layer()
    network.set_accuracy()
    network.init_optimizer()
    train_tool = TrainTool()
    train_tool.bind_network(network)
    sess = tf.Session()
    sess.run(tf.initialize_all_variables())
    for i in range(300):
        batch = mnist.train.next_batch(100)
        feed_dict = {
            network.input: np.reshape(batch[0], [-1, 28, 28, 1]),
            network.labels: batch[1]
        }
        train_tool.train(sess, network.output, feed_dict=feed_dict)
        if (i + 1) % 100 == 0:
            train_tool.print_accuracy(sess, feed_dict)
            train_tool.save_model_to_pb_file(
                sess,
                '../pb/alexnet-' + str(i + 1) + '/',
                input_data={'input': network.input},
                output={'predict-result': network.output})
            # train_tool.save_ckpt_model('f:/tf_net_parser/save_model/model', sess, gloabl_step=(i+1))

    batch_test = mnist.test.next_batch(100)
    feed_dict = {
        network.input: np.reshape(batch_test[0], [100, 28, 28, 1]),
        network.labels: batch_test[1]
    }
    train_tool.print_test_accuracy(sess, feed_dict)
Пример #5
0
    def __init__(self, input_dim, output_dim, training=True):
        self.training = training
        nb = NetworkBuilder()
        with tf.name_scope("Input"):
            self.input = tf.placeholder(tf.float32,
                                        shape=[None, input_dim, input_dim, 1],
                                        name="input")

        with tf.name_scope("Output"):
            self.output = tf.placeholder(tf.float32,
                                         shape=[None, output_dim],
                                         name="output")

        with tf.name_scope("ImageModel"):
            model = self.input
            model = nb.add_batch_normalization(model, self.training)
            model = nb.add_conv_layer(model,
                                      output_size=64,
                                      feature_size=(4, 4),
                                      padding='SAME',
                                      activation=tf.nn.relu)
            model = nb.add_max_pooling_layer(model)
            model = nb.add_dropout(model, 0.1, self.training)
            model = nb.add_conv_layer(model,
                                      64,
                                      feature_size=(4, 4),
                                      activation=tf.nn.relu,
                                      padding='VALID')
            model = nb.add_max_pooling_layer(model)
            model = nb.add_dropout(model, 0.3, self.training)
            model = nb.flatten(model)
            model = nb.add_dense_layer(model, 256, tf.nn.relu)
            model = nb.add_dropout(model, 0.5, self.training)
            model = nb.add_dense_layer(model, 64, tf.nn.relu)
            model = nb.add_batch_normalization(model, self.training)
            self.logits = nb.add_dense_layer(model,
                                             output_dim,
                                             activation=tf.nn.softmax)
Пример #6
0
 def test_build(self):
     builder = NetworkBuilder(700)
     net = builder.build(self.megas, 1)
Пример #7
0
    def __init__(self,
                 env_type=Env.MULTI,
                 env_dict=None,
                 env_file_name=None,
                 training_dict=None,
                 training_file_name=None,
                 env_dict_string=False,
                 training_name=None,
                 model_load_name=None,
                 load_model=False,
                 network_type=Network.SA_TO_Q,
                 custom_network=None,
                 training_mode=Modes.TRAINING,
                 use_tensorboard=True,
                 print_training=True,
                 require_all_params=False,
                 gym_env_name=None):

        self.env_type = env_type
        self.training_name = training_name
        self.training_mode = training_mode
        self.network_type = network_type
        self.print_training = print_training
        self.env_file_name = env_file_name
        self.env_dict = env_dict
        self.env_dict_string = env_dict_string
        self.gym_env_name = gym_env_name
        #check for valid environment settings
        if env_type == Env.LEGACY:
            self.env_import = environment
        elif env_type == Env.MULTI:
            self.env_import = multienvironment
        else:
            #A gym environment
            self.env_import = None

        #remove after testing
#        #Load environment arguments
#        if env_file_name is not None:
#            self.env_args = self.read_dict('./args/environment/' + env_file_name + '.txt')
#            if env_dict is not None:
#                for key in env_dict:
#                    self.env_args[key] = env_dict[key]
#        elif env_dict is not None:
#            if env_dict_string:
#                env_dict = eval(env_dict)
#            self.env_args = env_dict
#        else:
#            self.env_args = None
#
#        #initialize the environment
#        self.env = self.env_import.Environment(**self.env_args)
        self.initialize_environment()
        #self.num_actions = self.env.action_space()
        self.num_actions = self.action_space()

        #load training parameters and set as class fields
        if training_mode is not Modes.TESTING:
            #Load training arguments
            if training_file_name is not None:
                self.parameter_dict = self.read_dict('./args/training/' +
                                                     training_file_name +
                                                     '.txt')
                if training_dict is not None:
                    for key in training_dict:
                        self.parameter_dict[key] = training_dict[key]
            elif training_dict is not None:
                self.parameter_dict = training_dict
            else:
                self.parameter_dict = None
                raise Exception('no training argument parameters specified')

            #instead of a boolean which turns requirement on/off, there will be a list of params that are crucial for testing
            #if require_all_params:
            #  for parameter in Parameters:
            #   if parameter not in parameter_dict:
            #    raise Exception('parameter list is missing required parameters for training')

            #initialize all training parameters as None, so those that are not used are still defined
            for parameter in Parameters:
                setattr(self, parameter.name, None)
            #convert parameter list to class fields
            for parameter in self.parameter_dict:
                #in case string based parameters are supported, this may not be needed
                setattr(self, parameter.name, self.parameter_dict[parameter])

        #Set up the network, custom_network should pass an uncompiled model built with keras layers
        #either load a pre-trained model or create a new model
        if load_model:
            self.model = tf.keras.models.load_model('./models/' +
                                                    model_load_name + '.h5')
        else:
            #create a new model following one of the preset models, or create a new custom model
            #image_shape = np.shape(self.env.screenshot())
            image_shape = self.observation_space()
            num_actions = self.num_actions
            if custom_network is not None:

                argument_dict = {
                    'image_shape': image_shape,
                    'num_actions': num_actions
                }
                if network_type == Network.SM_TO_QA:
                    argument_dict['stack_size'] = self.env.stacker.stack_size
                if network_type == Network.SR_TO_QA:
                    argument_dict['trace_length'] = self.TRACE_LENGTH
                network_builder = NetworkBuilder(custom_network, network_type,
                                                 argument_dict)
                self.model = network_builder.get_model()
            else:
                if self.env_type is not Env.GYM:
                    #fix this
                    if self.env.frame_stacking:
                        #a tupple
                        base_size = image_shape[0:2]
                        #a scalar
                        channels = image_shape[2]
                        stack_size = self.env.stacker.stack_size
                        #example stack- image dimensions: 30 x 40, stack size: 4, channels: 3
                        if self.env.concatenate:
                            #should be width by height by (channels * stack size)
                            #so 30 x 40 x 12
                            image_shape = base_size + (channels * stack_size, )
                        #else:
                        #hould be stack size by height  by width by channels
                        #so 4 x 30 x 40 x 3
                        #image_shape = (stack_size,) + base
                    #print(image_shape)
                kernel_size = (5, 5)

                #default models for each network type
                if network_type == Network.SA_TO_Q:

                    image_input = tf.keras.Input(shape=image_shape)

                    conv1 = tf.keras.layers.Conv2D(
                        32,
                        kernel_size=kernel_size,
                        activation=tf.keras.activations.relu,
                        strides=1)(image_input)
                    pooling1 = tf.keras.layers.MaxPooling2D(
                        pool_size=(2, 2))(conv1)
                    drop1 = tf.keras.layers.Dropout(.25)(pooling1)

                    conv2 = tf.keras.layers.Conv2D(
                        64,
                        kernel_size=kernel_size,
                        strides=1,
                        activation=tf.keras.activations.relu)(drop1)
                    pooling2 = tf.keras.layers.MaxPooling2D(
                        pool_size=(2, 2))(conv2)
                    drop2 = tf.keras.layers.Dropout(0.25)(pooling2)

                    flat = tf.keras.layers.Flatten()(drop2)
                    conv_dense = tf.keras.layers.Dense(
                        100, activation=tf.keras.activations.relu)(flat)

                    action_input = tf.keras.Input(shape=(num_actions, ))
                    action_dense = tf.keras.layers.Dense(
                        num_actions**2,
                        activation=tf.keras.activations.relu)(action_input)

                    merged_dense = tf.keras.layers.concatenate(
                        [conv_dense, action_dense])
                    dense1 = tf.keras.layers.Dense(
                        10, activation=tf.keras.activations.relu)(merged_dense)
                    output = tf.keras.layers.Dense(
                        1, activation=tf.keras.activations.linear)(dense1)

                    self.model = tf.keras.Model(
                        inputs=[image_input, action_input], outputs=output)

                elif network_type == Network.S_TO_QA:
                    image_input = tf.keras.Input(shape=image_shape)

                    conv1 = tf.keras.layers.Conv2D(
                        32,
                        kernel_size=kernel_size,
                        activation=tf.keras.activations.relu,
                        strides=1)(image_input)
                    pooling1 = tf.keras.layers.MaxPooling2D(
                        pool_size=(2, 2))(conv1)
                    drop1 = tf.keras.layers.Dropout(.25)(pooling1)

                    conv2 = tf.keras.layers.Conv2D(
                        64,
                        kernel_size=kernel_size,
                        strides=1,
                        activation=tf.keras.activations.relu)(drop1)
                    pooling2 = tf.keras.layers.MaxPooling2D(
                        pool_size=(2, 2))(conv2)
                    drop2 = tf.keras.layers.Dropout(0.25)(pooling2)

                    flat = tf.keras.layers.Flatten()(drop2)
                    conv_dense = tf.keras.layers.Dense(
                        100, activation=tf.keras.activations.relu)(flat)
                    output = tf.keras.layers.Dense(
                        num_actions,
                        activation=tf.keras.activations.linear)(conv_dense)

                    self.model = tf.keras.Model(inputs=image_input,
                                                outputs=output)

                elif network_type == Network.SM_TO_QA:
                    #concat should be false
                    stack_size = self.env.stacker.stack_size
                    input_layer_list = []
                    dense_layer_list = []
                    for i in range(stack_size):

                        image_input = tf.keras.Input(shape=image_shape)

                        conv1 = tf.keras.layers.Conv2D(
                            32,
                            kernel_size=kernel_size,
                            activation=tf.keras.activations.relu,
                            strides=1)(image_input)
                        pooling1 = tf.keras.layers.MaxPooling2D(
                            pool_size=(2, 2))(conv1)
                        drop1 = tf.keras.layers.Dropout(.25)(pooling1)

                        conv2 = tf.keras.layers.Conv2D(
                            64,
                            kernel_size=kernel_size,
                            strides=1,
                            activation=tf.keras.activations.relu)(drop1)
                        pooling2 = tf.keras.layers.MaxPooling2D(
                            pool_size=(2, 2))(conv2)
                        drop2 = tf.keras.layers.Dropout(0.25)(pooling2)

                        flat = tf.keras.layers.Flatten()(drop2)

                        #add to layer lists
                        input_layer_list.append(image_input)
                        dense_layer_list.append(flat)

                    merged_dense = tf.keras.layers.concatenate(
                        dense_layer_list)
                    dense1 = tf.keras.layers.Dense(
                        100,
                        activation=tf.keras.activations.relu)(merged_dense)
                    dense2 = tf.keras.layers.Dense(
                        100, activation=tf.keras.activations.relu)(dense1)
                    output = tf.keras.layers.Dense(
                        num_actions,
                        activation=tf.keras.activations.linear)(dense2)

                    self.model = tf.keras.Model(inputs=input_layer_list,
                                                outputs=output)
                else:
                    raise Exception(
                        'invalid network type or no default model for network type'
                    )

        #other variables for training
        self.target_model = None
        self.tensorboard = None
        self.replay_memory = None
        self.reward_list = []
        self.epsilon = 1
        self.epsilon_decay_function = None

        #self.model should now be defined, compile the model if training
        if training_mode is not Modes.TESTING:
            #copy the model if using double q learning
            if self.DOUBLE:
                self.target_model = tf.keras.models.clone_model(self.model)

            #redo this logic eventually, support all tf optimizers
            if self.OPTIMIZER == Optimizer.ADAM:
                self.OPTIMIZER = tf.keras.optimizers.Adam
            elif self.OPTIMIZER == Optimizer.SGD:
                self.OPTIMIZER = tf.keras.optimizers.SGD
            if self.OPTIMIZER is None:
                self.OPTIMIZER = tf.keras.optimizers.Adam
            self.model.compile(loss=tf.keras.losses.mean_squared_error,
                               optimizer=self.OPTIMIZER(lr=self.ALPHA))
            if self.DOUBLE:
                self.update_target()
            #test custom directory thing

            #initialize epsilon decay function
            #right now, all functions should have 2 arg , epsilon and the total number of epochs
            if self.EPSILON_DECAY == Decay.LINEAR:
                self.epsilon_decay_function = self.linear_decay
            else:
                raise Exception('Decay function not found')

            if use_tensorboard:
                self.tensorboard = tf.keras.callbacks.TensorBoard(
                    log_dir='logs/{}/{}'.format(training_name, time()),
                    batch_size=self.BATCH_SIZE,
                    write_grads=True,
                    write_images=True)
                self.tensorboard.set_model(self.model)
            if self.UPDATE_FREQUENCY is None:
                self.UPDATE_FREQUENCY = 1
Пример #8
0
#
# painter.draw_raw_road_network(splitter.old_road_network)
# painter.draw_calculated_road_network(splitter.road_network)
#
# cost = utils.compute_road_network_cost(splitter.road_network)
#
# print(f"Стоимость строительства дорожной сети: {cost}")
from network_builder import NetworkBuilder
#
t, q = utils.read_terminal_points("input_terminal_points")
p = np.vstack((t, q))

g = Grid(p, set(range(len(t), len(p))))
g.generate()
# painter.draw_grid(g)
#
# print(g.distance_matrix[0, 1])

nb = NetworkBuilder(g)
network = nb.build_network()

splitter = EdgesSplitter(network)
splitter.calculate()

painter.draw_raw_road_network(splitter.old_road_network)
painter.draw_calculated_road_network(splitter.road_network)
#
cost = utils.compute_road_network_cost(splitter.road_network)

print(f"Стоимость строительства дорожной сети: {cost}")
Пример #9
0
    def do(self, matrix, transcriptome, metabolome, depth, filter, limit, queries, 
           subparser_name, starting_compounds, steps, number_of_queries, output_directory):
        '''
        Parameters
        ----------
        depth
        filter
        limit
        metabolome
        queries

        subparser_name
        transcriptome
        output_directory

        '''

        nb = NetworkBuilder(self.metadata.keys())
        km = KeggMatrix(matrix, transcriptome)

        abundances_metagenome = \
                {key:km.group_abundances(self.metadata[key],
                                         km.reaction_matrix) 
                 for key in self.metadata.keys()}

        if transcriptome:
            abundances_transcriptome = \
                    {key:km.group_abundances(self.metadata[key],
                                             km.reaction_matrix_transcriptome) 
                     for key in self.metadata.keys()}            
            abundances_expression = \
                    {key:km.group_abundances(self.metadata[key],
                                             km.reaction_matrix_expression) 
                     for key in self.metadata.keys()}
        else:
            abundances_transcriptome = None
            abundances_expression    = None

        if metabolome:
            abundances_metabolome = Matrix(metabolome) 
            ### ~ TODO: This WILL NOT WORK - MATRIX is no longer an existing class.
            ### ~ TODO: I've added a note in the holp for network analyzer 
            ### ~ TODO: that warns the user about this.
        else:
            abundances_metabolome = None

        if subparser_name==self.TRAVERSE:
            logging.info('Traversing network')
            output_lines = \
                            nb.traverse(abundances_metagenome,
                                        abundances_transcriptome,
                                        limit,
                                        filter,
                                        starting_compounds,
                                        steps,
                                        number_of_queries)
            self._write_results(os.path.join(output_directory, self.TRAVERSE_OUTPUT_FILE), output_lines)

        elif subparser_name==self.EXPLORE:
            logging.info("Using supplied queries (%s) to explore network" \
                                                        % queries)
            network_lines, node_metadata = \
                            nb.query_matrix(abundances_metagenome, 
                                            abundances_transcriptome,
                                            abundances_expression,
                                            queries,
                                            depth)

            self._write_results(os.path.join(output_directory, self.NETWORK_OUTPUT_FILE), network_lines)
            self._write_results(os.path.join(output_directory, self.METADATA_OUTPUT_FILE), node_metadata)

        elif subparser_name==self.PATHWAY:
            logging.info('Generating pathway network')

            network_lines, node_metadata = \
                            nb.pathway_matrix(abundances_metagenome, 
                                              abundances_transcriptome,
                                              abundances_expression,
                                              abundances_metabolome,
                                              limit,
                                              filter)

            self._write_results(os.path.join(output_directory, self.NETWORK_OUTPUT_FILE), network_lines)
            self._write_results(os.path.join(output_directory, self.METADATA_OUTPUT_FILE), node_metadata)
Пример #10
0
from network_builder import NetworkBuilder


nets_available = ["Net1","Net2","Net4","Net5","Net7","Net8","Net9","Net10","Net10v2","Net11","Net11v2","Net11v3"]

images_in = 10
for name in nets_available:
    if name == "Net9":
        
        bn = NetworkBuilder(name, depth=32,num_input_im = images_in, max_batch_size = 6, epochs = 500, steps_per_epoch = 50).build_net()
    else:
        bn = NetworkBuilder(name, depth=32,max_batch_size = 6, epochs = 500, steps_per_epoch = 5).build_net()
    bn.build()
    bn.fit()
    bn.gen_raport()
    del bn# = None
Пример #11
0
 def __init__(self):
     self.dataset = DatasetLoader()
     self.networkbuilder = NetworkBuilder()