コード例 #1
0
def main():
    plot_options = {
        # 'node_color': 'lightblue',
        # 'node_size': 800,
        # 'width': 1.7,
        'with_labels': True
        # 'font_weight': 'bold'
    }
    new_switches = 10
    new_hubs = 10
    new_hosts = 100
    new_graphs = 10
    # randtree, randtree_pos, randtree_opt = random_graph(new_switches,
    #                                                     new_hubs,
    #                                                     new_hosts)
    # type_dict = nx.get_node_attributes(randtree, 'type')
    # pprint(list(randtree.nodes))

    # graph_path = '/home/akern/Documents/grafos/'
    graph_path = '/mnt/hgfs/Projeto Final Dissertacao/snakebones/grafos_rand/'
    file_name = \
        f'randomgraph_sw{new_switches:02}_hub{new_hubs:02}_host{new_hosts:03}_'
    # # gerar e salvar grafos
    # graph_gen = \
    #     random_graphs(new_switches, new_hubs, new_hosts, many=new_graphs)
    # for i, (graph, places, options) in enumerate(graph_gen):
    #     nx.nx_pydot.write_dot(graph, f'{graph_path}{file_name}{i+1:002}.txt')

    # ler e fazer plot dos grafos
    graph_list = list()
    for i in range(new_graphs):
        graph_loaded = nx.Graph(
            nx.nx_pydot.read_dot(f'{graph_path}{file_name}{i+1:002}.txt'))
        graph_list.append(graph_loaded)
        # plot_graph(graph_loaded, randtree_pos, randtree_opt)

    project_id = '389dde3d-08ac-447b-8d54-b053a3f6ed19'  # scritp-test.gns3
    nms_id = 'a296b0ec-209a-47a5-ae11-fe13f25e7b73'
    # curl "http://192.168.139.128:3080/v2/computes"
    # vm = Gns3('192.168.139.128')

    pc = Gns3('192.168.139.1', project_id=project_id)
    print("\nGNS3 PC: ")
    print(pc)
    # pprint(pc.version)
    # pprint(pc.computes)
    # pprint(pc.projects)
    # pprint(pc.nodes())
    # pprint(pc.nodes_amouts())

    # breakpoint()

    # Cria nodes e links no GNS3
    for graph in graph_list[:2]:
        pc.nodes_from_graph(graph, subnets=3)
        pc.clear_links(keep=(nms_id, ))
        pc.links_from_graph(graph)
        breakpoint()

    pc.clear_links()
コード例 #2
0
    def sample_training_data_of_novel_categories(self, exp_id=0):
        nKnovel = self.nKnovel
        nKbase = self.nKbase
        nExemplars = self.nExemplars

        random.seed(exp_id)  # fix the seed for this experiment.
        breakpoint()
        # Ids of the base categories.
        Kbase = sorted(self.dataset_evaluation.labelIds_base)
        # Ids of the novel categories.
        Knovel = sorted(self.dataset_evaluation.labelIds_novel)
        assert (len(Kbase) == nKnovel and len(Knovel) == nKbase)
        Kall = Kbase + Knovel

        # Sample `nExemplars` number of training examples for each novel
        # category.
        train_examples = self.sample_training_examples_for_novel_categories(
            Knovel, nExemplars)

        breakpoint()
        self.Kid2Label = {kid: label_idx for label_idx, kid in enumerate(Kall)}

        breakpoint()
        base_classes_subset = self.dataset_train_novel.base_classes_subset
        assert (len(
            set.intersection(
                set(Kall[:nKbase]),
                set(base_classes_subset))) == len(base_classes_subset))
        self.Kids_base_subset = sorted(
            [self.Kid2Label[kid] for kid in base_classes_subset])

        Kall = torch.LongTensor(Kall)
        images_train, labels_train = self.create_examples_tensor_data(
            train_examples)
        return images_train, labels_train, Kall, nKbase, nKnovel
コード例 #3
0
  def append_flipped_images(self):
    num_images = self.num_images
    widths = self._get_widths()

    for i in range(num_images):
      boxes = self.roidb[i]['boxes'].copy()
      oldx1 = boxes[:, 0].copy()
      oldx2 = boxes[:, 2].copy()
      boxes[:, 0] = widths[i] - oldx2 - 1
      boxes[:, 2] = widths[i] - oldx1 - 1


      try:
        assert (boxes[:, 2] >= boxes[:, 0]).all()
      except:

        breakpoint()
        
        print('error')
        print(boxes[:, 2] >= boxes[:, 0])
        print(boxes)
        print(widths[i])
        
      if 'seg_map' in self.roidb[i].keys():
        seg_map = self.roidb[i]['seg_map'][::-1, :]
        entry = {'boxes': boxes,
                 'gt_overlaps': self.roidb[i]['gt_overlaps'],
                 'gt_classes': self.roidb[i]['gt_classes'],
                 'flipped': True,
                 'seg_map':seg_map}
      else:
        entry = {'boxes': boxes,
               'gt_overlaps': self.roidb[i]['gt_overlaps'],
               'gt_classes': self.roidb[i]['gt_classes'],
               'flipped': True}
      self.roidb.append(entry)
    self._image_index = self._image_index * 2
コード例 #4
0
 def error(msg):
     sys.stderr.write('ERROR at line ' + str(line_num) + ': ' + msg + '\n')
     if debug_mode:
         breakpoint()
     else:
         exit()
コード例 #5
0
 def error(msg):
     sys.stderr.write('ERROR at line ' + str(line_num) + ': ' + msg + '\n')
     if debug_mode:
         breakpoint()
     else:
         exit()
コード例 #6
0
        # Create if statement for the age
        if 0 <= age <= 120:
           self.age = age
        else:
           self.age = 999
    
        self.birth_year=birth_year
        self.home_state=home_state
    
    # Define for model
    def greets(self):
        print(f'Hello Harun! My name is {self.name}, nice to meet you!')

    def had_birth(self):
        self.age += 1

    def where_you_from(self):
        print(f'I am from {self.home_state}!')


if __name__=="__main__":

    person1=Person("Elif",38, 1981 ,"Wisconsin")
    person2=Person("Musa",41, 1979,"Wisconsin")
    breakpoint()





コード例 #7
0
 def error(msg):
     sys.stderr.write("ERROR at line " + str(line_num) + ": " + msg + "\n")
     if debug_mode:
         breakpoint()
     else:
         exit()
コード例 #8
0
def train():
    #checkpoint_path_test = "/media/fuming/Black6TB/CMPL/Cross-Modal-Projection-Learning/flickr30k_resnet152_cmpm/checkpoint"
    #print(checkpoint_path_test)
    #print(tf.train.latest_checkpoint(checkpoint_path_test))
    if not FLAGS.dataset_dir:
        raise ValueError('You must supply the dataset directory with --dataset_dir')

    tf.logging.set_verbosity(tf.logging.INFO)
    with tf.Graph().as_default():
        #######################
        # Config model_deploy #
        #######################
        deploy_config = model_deploy.DeploymentConfig(
            num_clones=FLAGS.num_clones,
            clone_on_cpu=FLAGS.clone_on_cpu,
            replica_id=FLAGS.task,
            num_replicas=FLAGS.worker_replicas,
            num_ps_tasks=FLAGS.num_ps_tasks)

        # Create global_step
        with tf.device(deploy_config.variables_device()):
            # global_step = slim.create_global_step()
            global_step = tf.train.create_global_step();
        ######################
        # Select the dataset #
        ######################
        dataset = dataset_factory.get_dataset(
            FLAGS.dataset_name, FLAGS.split_name, FLAGS.dataset_dir)

        ###########################
        # Select the CNN network  #
        ###########################
        network_fn = nets_factory.get_network_fn(
            FLAGS.model_name,
            num_classes=None,
            weight_decay=FLAGS.weight_decay,
            is_training=True)

        #########################################
        # Configure the optimization procedure. #
        #########################################
        with tf.device(deploy_config.optimizer_device()):
            learning_rate = configure_learning_rate(dataset.num_samples, global_step)
            optimizer = configure_optimizer(learning_rate)

        #####################################
        # Select the preprocessing function #
        #####################################
        preprocessing_name = FLAGS.preprocessing_name
        image_preprocessing_fn = preprocessing_factory.get_preprocessing(
            preprocessing_name,
            is_training=True)

        ##############################################################
        # Create a dataset provider that loads data from the dataset #
        ##############################################################
        with tf.device(deploy_config.inputs_device()):
            examples_per_shard = 1024
            min_queue_examples = examples_per_shard * FLAGS.input_queue_memory_factor
            provider = slim.dataset_data_provider.DatasetDataProvider(
                dataset,
                num_readers=FLAGS.num_readers,
                common_queue_capacity=min_queue_examples + 3 * FLAGS.batch_size,
                common_queue_min=min_queue_examples)
            [image, label, text_id, text] = provider.get(['image', 'label', 'caption_ids', 'caption'])

            train_image_size = network_fn.default_image_size
            image = image_preprocessing_fn(image, train_image_size, train_image_size)

            # This function splits the text into an input sequence and a target sequence,
            # where the target sequence is the input sequence right-shifted by 1. Input and
            # target sequences are batched and padded up to the maximum length of sequences
            # in the batch. A mask is created to distinguish real words from padding words.
            # Note that the target sequence is used if performing caption generation
            seq_length = tf.shape(text_id)[0]
            input_length = tf.expand_dims(tf.subtract(seq_length, 1), 0)
            input_seq = tf.slice(text_id, [0], input_length)
            target_seq = tf.slice(text_id, [1], input_length)
            input_mask = tf.ones(input_length, dtype=tf.int32)

            print("initial input_seq is ********************\n")
            print(input_seq)
            print("*****************************************\n")

            print("initial input_mask is *******************\n")
            print(input_mask)
            print("*****************************************\n")
            
            images, labels, input_seqs, target_seqs, input_masks, texts, text_ids = tf.train.batch(
                [image, label, input_seq, target_seq, input_mask, text, text_id],
                batch_size=FLAGS.batch_size,
                capacity=2 * FLAGS.num_preprocessing_threads * FLAGS.batch_size,
                dynamic_pad=True,
                name="batch_and_pad")

            batch_queue = slim.prefetch_queue.prefetch_queue(
                [images, labels, input_seqs, target_seqs, input_masks, texts, text_ids],
                capacity=16 * deploy_config.num_clones,
                num_threads=FLAGS.num_preprocessing_threads,
                dynamic_pad=True,
                name="perfetch_and_pad")

            images, labels, input_seqs, target_seqs, input_masks, texts, text_ids = batch_queue.dequeue()

        images_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=images)
        labels_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=labels)
        input_seqs_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=input_seqs)
        target_seqs_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=target_seqs)
        input_masks_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=input_masks)
        texts_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=texts)
        text_ids_splits = tf.split(axis=0, num_or_size_splits=FLAGS.num_gpus, value=text_ids)

        tower_grads = []
        for k in xrange(FLAGS.num_gpus):
            with tf.device('/gpu:%d' % k):
                with tf.name_scope('tower_%d' % k) as scope:
                    with tf.variable_scope(tf.get_variable_scope()):

                        loss, cmpm_loss, cmpc_loss, i2t_loss, t2i_loss = \
                            _tower_loss(network_fn, images_splits[k], labels_splits[k],
                                        input_seqs_splits[k], input_masks_splits[k])

                        # Reuse variables for the next tower.
                        tf.get_variable_scope().reuse_variables()

                        # Retain the summaries from the final tower.
                        summaries = tf.get_collection(tf.GraphKeys.SUMMARIES, scope)

                        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS, scope=scope)

                        # Variables to train.
                        variables_to_train = get_variables_to_train()
                        grads = optimizer.compute_gradients(loss, var_list=variables_to_train)

                        tower_grads.append(grads)

        # We must calculate the mean of each gradient. Note that this is the
        # synchronization point across all towers.
        grads = _average_gradients(tower_grads)

        # Add a summary to track the learning rate and precision.
        summaries.append(tf.summary.scalar('learning_rate', learning_rate))

        # Add histograms for histogram and trainable variables.
        for grad, var in grads:
            if grad is not None:
                summaries.append(tf.summary.histogram(var.op.name + '/gradients', grad))

        for var in tf.trainable_variables():
            summaries.append(tf.summary.histogram(var.op.name, var))

        #################################
        # Configure the moving averages #
        #################################
        if FLAGS.moving_average_decay:
            moving_average_variables = slim.get_model_variables()
            variable_averages = tf.train.ExponentialMovingAverage(
                FLAGS.moving_average_decay, global_step)
            update_ops.append(variable_averages.apply(moving_average_variables))

        # Apply the gradients to adjust the shared variables.
        grad_updates = optimizer.apply_gradients(grads, global_step=global_step)
        update_ops.append(grad_updates)

        # Group all updates to into a single train op.
        train_op = tf.group(*update_ops)

        # Create a saver.
        saver = tf.train.Saver(tf.global_variables())

        # Build the summary operation from the last tower summaries.
        summary_op = tf.summary.merge(summaries)

        # Build an initialization operation to run below.
        init = tf.global_variables_initializer()

        # Start running operations on the Graph. allow_soft_placement must be set to
        # True to build towers on GPU, as some of the ops do not have GPU implementations.
        config = tf.ConfigProto(
            allow_soft_placement=True,
            log_device_placement=FLAGS.log_device_placement)

        sess = tf.Session(config=config)
        sess.run(init)

        ck_global_step = get_init_fn(sess)
        print_train_info()

        # Start the queue runners.
        tf.train.start_queue_runners(sess=sess)

        summary_writer = tf.summary.FileWriter(
            os.path.join(FLAGS.log_dir),
            graph=sess.graph)

        num_steps_per_epoch = int(dataset.num_samples / FLAGS.batch_size)
        max_number_of_steps = FLAGS.num_epochs * num_steps_per_epoch
        breakpoint()
        print("max_number_of_steps is %d", max_number_of_steps)
        for step in xrange(max_number_of_steps):
            step += int(ck_global_step)
            # check the training data
            # simages, slabels, sinput_seqs, starget_seqs, sinput_masks, stexts, stext_ids = \
            # sess.run([images_splits[0], labels_splits[0], input_seqs_splits[0], target_seqs_splits[0],
            #           input_masks_splits[0], texts_splits[0], text_ids_splits[0]])
            # save_images(simages[:8], [1, 8], './{}/{:05d}.png'.format(FLAGS.train_samples_dir, step))
            # import pdb
            # pdb.set_trace()

            _, total_loss_value, cmpm_loss_value, cmpc_loss_value, i2t_loss_value, t2i_loss_value = \
                sess.run([train_op, loss, cmpm_loss, cmpc_loss, i2t_loss, t2i_loss])
            print(cmpm_loss_value)
            print(cmpc_loss_value)
            print(total_loss_value)
            assert not np.isnan(cmpm_loss_value), 'Model diverged with cmpm_loss = NaN'
            assert not np.isnan(cmpc_loss_value), 'Model diverged with cmpc_loss = NaN'
            assert not np.isnan(total_loss_value), 'Model diverged with total_loss = NaN'

            if step % 10 == 0:
                format_str = ('%s: step %d, cmpm_loss = %.2f, cmpc_loss = %.2f, '
                              'i2t_loss = %.2f, t2i_loss = %.2f')
                print(format_str % (FLAGS.dataset_name, step, cmpm_loss_value, cmpc_loss_value,
                                    i2t_loss_value, t2i_loss_value))

            if step % 100 == 0:
                summary_str = sess.run(summary_op)
                summary_writer.add_summary(summary_str, step)

            # Save the model checkpoint periodically.
            if step % FLAGS.ckpt_steps == 0 or (step + 1) == max_number_of_steps:
                checkpoint_path = os.path.join(FLAGS.checkpoint_dir, 'model.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
コード例 #9
0
class Dog():
    def __init__(self, name, age, housebroke=True):
        self.name = name
        self.age = age
        self.housebroke = housebroke

    def bark(self):
        print(f'{self.name} likes to bark!')


# implementing inheritance between classes
class Beagle(Dog):
    def __init__(self, name, age, housebroke=True, barks_alot=True):
        super().__init__(name, age, housebroke)
        self.barks_alot = barks_alot

    # this fucntion has to be inside of this class so at the runtime can be run
    def bark(self):
        if self.barks_alot == True:
            print(f'{self.name} likes to bark!')
        else:
            print(f'{self.name} hates to bark!')


if __name__ == "__main__":

    lucky = Dog('lokey', 3)  # if i take out barks_alot the bark function works
    spike = Dog('spike', 7)  # but if a leave just run the beagle function
    breakpoint()  # or i have to bring this function into the beagle class