Exemple #1
0
    def readhalo(self, groupid, parttype=1):
        assert parttype in self.parttypes, 'Specified parttype is not available in this run'
        snapdir = self.snapdir
        snapnum = self.snapnum
        cat = self.cat
        centre = cat.GroupPos[groupid]
        if self.useFOF:
            pos = snapshot.loadHalo(snapdir, snapnum, groupid, parttype,
                                    ["Coordinates"])
            if parttype != 1:
                mass = snapshot.loadHalo(snapdir, snapnum, groupid, parttype,
                                         ["Masses"])
        else:  #DEFAULT: not NR and use central subhalo only
            subnum = cat.GroupFirstSub[groupid]
            pos = snapshot.loadSubhalo(snapdir, snapnum, subnum, parttype,
                                       ["Coordinates"])
            if parttype != 1:
                mass = snapshot.loadSubhalo(snapdir, snapnum, subnum, parttype,
                                            ["Masses"])

        npart = len(pos)
        try:
            pos = utils.image(pos - centre, None, self.boxsize)
        except:
            print 'readhalo failed:', groupid, pos.__class__, centre
            return -1, None
        assert npart == len(pos), 'Readhalo error! pos={}'.format(pos)

        if parttype == 1:
            return pos, None
        else:
            return pos, mass
Exemple #2
0
    def readsubhalo(self, subid, parttype=1):
        snapdir = self.snapdir
        snapnum = self.snapnum
        cat = self.cat
        centre = cat.SubhaloPos[subid]
        if self.useFOF:
            raise Exception, 'readsubhalo is incompatible with option useFOF'
        else:  #use central subhalo only
            pos = snapshot.loadSubhalo(snapdir, snapnum, subid, parttype,
                                       ["Coordinates"])
            if parttype != 1:
                mass = snapshot.loadSubhalo(snapdir, snapnum, subid, parttype,
                                            ["Masses"])

        if type(pos) == dict:
            assert pos['count'] == 0
            #print 'No particles of type {} in subhalo {}'.format(parttype,subid)
            return 0, None

        npart = len(pos)
        try:
            pos = utils.image(pos - centre, None, self.boxsize)
        except:
            print 'readsubhalo failed:', subid, pos.__class__, centre
            return -1, None
        #assert npart == len(pos),'Readhalo error! pos={}'.format(pos)

        if parttype == 1:
            return pos, None
        else:
            return pos, mass
Exemple #3
0
def main():
    # Create image from image file path
    img = image()
    # Prompt user for starting and target points
    coords = coordinates(img)
    # Find optimal path based on starting and target points
    find_optimal_path(img, coords)
Exemple #4
0
        def lire_trans():
            def attendu(lu, ZOU):
                if lu != ZOU:
                    raise Fini_OK

            def lire_zeros():
                try:
                    lu = next(iter)
                except StopIteration:
                    raise Fini
                attendu(lu, '0')
                n = 1
                while True:
                    lu = next(iter)
                    if lu != '0': break
                    n += 1
                attendu(lu, '1')
                return n

            def lire_etat():
                etat = str(lire_zeros())
                self.etats.add(etat)
                return etat

            def lire_symb():
                symb = symbs[lire_zeros()]
                self.gamma.add(symb)
                return symb

            def lire_mouv():
                mouv = mouvs[lire_zeros()]
                return mouv

            try:
                try:
                    source = lire_etat()
                except Fini:
                    raise Fini_OK ## plus de transition
                lu = lire_symb()
                but = lire_etat()
                ecrit = lire_symb()
                mouv = lire_mouv()


                if source in self.trans:
                    assert (lu not in self.trans[source]),\
                            message("double definition de transition sur " +
                                    image((source, lu)))
                else:
                    self.trans[source] = { }
                self.trans[source][lu] = (but, ecrit, mouv)
            except Fini: ## dans ZOU*, mais pas valide -> pas de trans
                self.trans = { }
                raise Fini_OK
Exemple #5
0
    def afficher(self):
        from utils import image

        print ("etats : {", seq_ens_trie(self.etats), "}")
        print ("Gamma : {", seq_ens_trie(self.gamma), "}")
        print ("Sigma : {", seq_ens_trie(self.sigma), "}")
        print ("blanc :", self.blanc)
        print ("etat initial :", self.init)

        if  len(self.finals) == 0:
            print("PAS D'ETAT FINAL")
        elif len(self.finals) == 1:
            print("etat final :", list(self.finals)[0])
        else:
            print ("etats finals : {", seq_ens_trie(self.finals), "}")

        if len(self.trans) == 0:
            print("PAS DE TRANSITION")
            return
        print("transitions :")
        for (source, trans) in sorted(self.trans.items()):
            for (lu, triplet) in sorted(trans.items()):
                print ("  " + image ((source, lu)) + " -> " + image (triplet))
Exemple #6
0
def resize(image, width=None, height=None):

    print('Resizing ', image)

    img = utils.image(image)
    w, h = img.size

    scale = _compute_scale(w, h, width, height)

    nh, nw = int(h * scale), int(w * scale)
    if scale < 1:
        return img.resize((nw, nh), Image.NEAREST)
    else:
        return img.resize((nw, nh), Image.BICUBIC)
Exemple #7
0
        def saveLaborHours(self):
            saveDir = r"C:\Users\ALESSANDROAlves\Box\Plan & Build\Governance\Companion Agreements\Brazil\04 - Actuals - Labor" 
            fileName = os.path.join(saveDir , utils.getStampedStr("{} - IPPF Actuals Extract.csv",utils.YYmMM_DD))
            utils.automate("IPPF Actual Hours Report",1,10)
            if os.path.isfile(fileName ): 
                os.remove(fileName) #deleting existing file
            utils.automate("IPPF Actual Hours Report",11,11)
            pyautogui.typewrite(fileName)
            time.sleep(2)
            pyautogui.press("enter")

            # Refresh PBReports
            excel.openPBR(pars.pbReportsLocal, utils.image(r"excel\files\PB Reports Brazil.xlsm.PNG"))
            utils.automate("Refresh PowerQueries",10,30)
Exemple #8
0
def simule(nom_fichier, mot, trace, pas_a_pas):
    from analyse import lire_mt
    from ruban import Ruban
    from utils import image

    m = lire_mt(nom_fichier)
    for s in mot:
        assert s in m.sigma, "symbole pas dans Sigma : " + s
    r = Ruban(m.blanc, mot)
    q = m.init
    nb_pas = 0
    max_len_etats = max([len(e) for e in m.etats])
    Configuration = "Configuration"
    Dec_Transition = 20
    Dec_trans = (len(Configuration) + Dec_Transition) * ' '
    try:
        if trace:
            print("Configuration" + Dec_Transition * ' ' + "Transition")
        while True:
            if trace:
                r.print_config(q, max_len_etats)
                if pas_a_pas:
                    input()
                else:
                    print()
            etat_symb = (q, r.symb())
            t = m.trans[q][r.symb()]  # -> KeyError si indefini
            nb_pas += 1
            if trace:
                print(Dec_trans + "[" + str(nb_pas) + "]  " +
                      image((q, r.symb())) + "  ->  " + image(t))
            q = t[0]
            r.trans(t[1], t[2])
    except KeyError:
        if trace:
            print(Dec_trans + "pas de transition, execution terminee")
        return (q, q in m.finals, nb_pas, r)
Exemple #9
0
    def getSubhaloes(self, group, parttype, minmass):
        s = slice(self.cat.GroupFirstSub[group],
                  self.cat.GroupFirstSub[group] + self.cat.GroupNsubs[group])

        pos = self.cat.SubhaloPos[s]
        mass = self.cat.SubhaloMass[s]

        centre = self.cat.GroupPos[group]
        npart = len(pos)
        try:
            pos = utils.image(pos - centre, None, self.boxsize)
        except:
            print 'getSubhaloes failed:', groupid, pos.__class__, centre
            return None

        return pos[mass > minmass / 1e10], mass[mass > minmass / 1e10]
Exemple #10
0
def _model_fn(features, labels, mode, params, model, variable_filter_fn=None):
    """Model definition entry.

  Args:
    features: the input image tensor with shape [batch_size, height, width, 3].
      The height and width are fixed and equal.
    labels: the input labels in a dictionary. The labels include class targets
      and box targets which are dense label maps. The labels are generated from
      get_input_fn function in data/dataloader.py
    mode: the mode of TPUEstimator including TRAIN, EVAL, and PREDICT.
    params: the dictionary defines hyperparameters of model. The default
      settings are in default_hparams function in this file.
    model: the model outputs class logits and box regression outputs.
    variable_filter_fn: the filter function that takes trainable_variables and
      returns the variable list after applying the filter rule.

  Returns:
    tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction.

  Raises:
    RuntimeError: if both ckpt and backbone_ckpt are set.
  """
    utils.image('input_image', features)
    training_hooks = []
    if params['data_format'] == 'channels_first':
        features = tf.transpose(features, [0, 3, 1, 2])

    def _model_outputs(inputs):
        # Convert params (dict) to Config for easier access.
        return model(inputs, config=hparams_config.Config(params))

    cls_outputs, box_outputs = utils.build_model_with_precision(
        params['precision'], _model_outputs, features,
        params['is_training_bn'])

    levels = cls_outputs.keys()
    for level in levels:
        cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
        box_outputs[level] = tf.cast(box_outputs[level], tf.float32)

    # First check if it is in PREDICT mode.
    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
            'image': features,
        }
        for level in levels:
            predictions['cls_outputs_%d' % level] = cls_outputs[level]
            predictions['box_outputs_%d' % level] = box_outputs[level]
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)

    # Set up training loss and learning rate.
    update_learning_rate_schedule_parameters(params)
    global_step = tf.train.get_or_create_global_step()
    learning_rate = learning_rate_schedule(params, global_step)

    # cls_loss and box_loss are for logging. only total_loss is optimized.
    det_loss, cls_loss, box_loss, box_iou_loss = detection_loss(
        cls_outputs, box_outputs, labels, params)
    reg_l2loss = reg_l2_loss(params['weight_decay'])
    total_loss = det_loss + reg_l2loss

    if mode == tf.estimator.ModeKeys.TRAIN:
        utils.scalar('lrn_rate', learning_rate)
        utils.scalar('trainloss/cls_loss', cls_loss)
        utils.scalar('trainloss/box_loss', box_loss)
        utils.scalar('trainloss/det_loss', det_loss)
        utils.scalar('trainloss/reg_l2_loss', reg_l2loss)
        utils.scalar('trainloss/loss', total_loss)
        if box_iou_loss:
            utils.scalar('trainloss/box_iou_loss', box_iou_loss)

    moving_average_decay = params['moving_average_decay']
    if moving_average_decay:
        ema = tf.train.ExponentialMovingAverage(decay=moving_average_decay,
                                                num_updates=global_step)
        ema_vars = utils.get_ema_vars()
    if params['strategy'] == 'horovod':
        import horovod.tensorflow as hvd  # pylint: disable=g-import-not-at-top
        learning_rate = learning_rate * hvd.size()
    if mode == tf.estimator.ModeKeys.TRAIN:
        if params['optimizer'].lower() == 'sgd':
            optimizer = tf.train.MomentumOptimizer(learning_rate,
                                                   momentum=params['momentum'])
        elif params['optimizer'].lower() == 'adam':
            optimizer = tf.train.AdamOptimizer(learning_rate)
        else:
            raise ValueError('optimizers should be adam or sgd')

        if params['strategy'] == 'tpu':
            optimizer = tf.tpu.CrossShardOptimizer(optimizer)
        elif params['strategy'] == 'horovod':
            optimizer = hvd.DistributedOptimizer(optimizer)
            training_hooks = [hvd.BroadcastGlobalVariablesHook(0)]

        # Batch norm requires update_ops to be added as a train_op dependency.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        var_list = tf.trainable_variables()
        if variable_filter_fn:
            var_list = variable_filter_fn(var_list)

        if params.get('clip_gradients_norm', 0) > 0:
            logging.info('clip gradients norm by %f',
                         params['clip_gradients_norm'])
            grads_and_vars = optimizer.compute_gradients(total_loss, var_list)
            with tf.name_scope('clip'):
                grads = [gv[0] for gv in grads_and_vars]
                tvars = [gv[1] for gv in grads_and_vars]
                clipped_grads, gnorm = tf.clip_by_global_norm(
                    grads, params['clip_gradients_norm'])
                utils.scalar('gnorm', gnorm)
                grads_and_vars = list(zip(clipped_grads, tvars))

            with tf.control_dependencies(update_ops):
                train_op = optimizer.apply_gradients(grads_and_vars,
                                                     global_step)
        else:
            with tf.control_dependencies(update_ops):
                train_op = optimizer.minimize(total_loss,
                                              global_step,
                                              var_list=var_list)

        if moving_average_decay:
            with tf.control_dependencies([train_op]):
                train_op = ema.apply(ema_vars)

    else:
        train_op = None

    eval_metrics = None
    if mode == tf.estimator.ModeKeys.EVAL:

        def metric_fn(**kwargs):
            """Returns a dictionary that has the evaluation metrics."""
            batch_size = params['batch_size']
            if params['strategy'] == 'tpu':
                batch_size = params['batch_size'] * params['num_shards']
            eval_anchors = anchors.Anchors(params['min_level'],
                                           params['max_level'],
                                           params['num_scales'],
                                           params['aspect_ratios'],
                                           params['anchor_scale'],
                                           params['image_size'])
            anchor_labeler = anchors.AnchorLabeler(eval_anchors,
                                                   params['num_classes'])
            cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
            box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])

            if params.get('testdev_dir', None):
                logging.info('Eval testdev_dir %s', params['testdev_dir'])
                coco_metrics = coco_metric_fn(
                    batch_size,
                    anchor_labeler,
                    params['val_json_file'],
                    testdev_dir=params['testdev_dir'],
                    disable_pyfun=params.get('disable_pyfun', None),
                    **kwargs)
            else:
                logging.info('Eval val with groudtruths %s.',
                             params['val_json_file'])
                coco_metrics = coco_metric_fn(batch_size, anchor_labeler,
                                              params['val_json_file'],
                                              **kwargs)

            # Add metrics to output.
            output_metrics = {
                'cls_loss': cls_loss,
                'box_loss': box_loss,
            }
            output_metrics.update(coco_metrics)
            return output_metrics

        cls_loss_repeat = tf.reshape(
            tf.tile(tf.expand_dims(cls_loss, 0), [
                params['batch_size'],
            ]), [params['batch_size'], 1])
        box_loss_repeat = tf.reshape(
            tf.tile(tf.expand_dims(box_loss, 0), [
                params['batch_size'],
            ]), [params['batch_size'], 1])
        metric_fn_inputs = {
            'cls_loss_repeat': cls_loss_repeat,
            'box_loss_repeat': box_loss_repeat,
            'source_ids': labels['source_ids'],
            'groundtruth_data': labels['groundtruth_data'],
            'image_scales': labels['image_scales'],
        }
        add_metric_fn_inputs(params, cls_outputs, box_outputs,
                             metric_fn_inputs)
        eval_metrics = (metric_fn, metric_fn_inputs)

    checkpoint = params.get('ckpt') or params.get('backbone_ckpt')

    if checkpoint and mode == tf.estimator.ModeKeys.TRAIN:
        # Initialize the model from an EfficientDet or backbone checkpoint.
        if params.get('ckpt') and params.get('backbone_ckpt'):
            raise RuntimeError(
                '--backbone_ckpt and --checkpoint are mutually exclusive')

        if params.get('backbone_ckpt'):
            var_scope = params['backbone_name'] + '/'
            if params['ckpt_var_scope'] is None:
                # Use backbone name as default checkpoint scope.
                ckpt_scope = params['backbone_name'] + '/'
            else:
                ckpt_scope = params['ckpt_var_scope'] + '/'
        else:
            # Load every var in the given checkpoint
            var_scope = ckpt_scope = '/'

        def scaffold_fn():
            """Loads pretrained model through scaffold function."""
            logging.info('restore variables from %s', checkpoint)

            var_map = utils.get_ckpt_var_map(ckpt_path=checkpoint,
                                             ckpt_scope=ckpt_scope,
                                             var_scope=var_scope,
                                             var_exclude_expr=params.get(
                                                 'var_exclude_expr', None))

            tf.train.init_from_checkpoint(checkpoint, var_map)

            return tf.train.Scaffold()
    elif mode == tf.estimator.ModeKeys.EVAL and moving_average_decay:

        def scaffold_fn():
            """Load moving average variables for eval."""
            logging.info('Load EMA vars with ema_decay=%f',
                         moving_average_decay)
            restore_vars_dict = ema.variables_to_restore(ema_vars)
            saver = tf.train.Saver(restore_vars_dict)
            return tf.train.Scaffold(saver=saver)
    else:
        scaffold_fn = None

    if params['strategy'] != 'tpu':
        # Profile every 1K steps.
        profile_hook = tf.train.ProfilerHook(save_steps=1000,
                                             output_dir=params['model_dir'])
        training_hooks.append(profile_hook)

        # Report memory allocation if OOM
        class OomReportingHook(tf.estimator.SessionRunHook):
            def before_run(self, run_context):
                return tf.estimator.SessionRunArgs(
                    fetches=[],
                    options=tf.RunOptions(
                        report_tensor_allocations_upon_oom=True))

        training_hooks.append(OomReportingHook())

    return tf.estimator.tpu.TPUEstimatorSpec(mode=mode,
                                             loss=total_loss,
                                             train_op=train_op,
                                             eval_metrics=eval_metrics,
                                             host_call=utils.get_tpu_host_call(
                                                 global_step, params),
                                             scaffold_fn=scaffold_fn,
                                             training_hooks=training_hooks)
Exemple #11
0
def _model_fn(features, labels, mode, params, model, variable_filter_fn=None):
    """Model definition entry.

  Args:
    features: the input image tensor with shape [batch_size, height, width, 3].
      The height and width are fixed and equal.
    labels: the input labels in a dictionary. The labels include class targets
      and box targets which are dense label maps. The labels are generated from
      get_input_fn function in data/dataloader.py
    mode: the mode of TPUEstimator including TRAIN and EVAL.
    params: the dictionary defines hyperparameters of model. The default
      settings are in default_hparams function in this file.
    model: the model outputs class logits and box regression outputs.
    variable_filter_fn: the filter function that takes trainable_variables and
      returns the variable list after applying the filter rule.

  Returns:
    tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction.

  Raises:
    RuntimeError: if both ckpt and backbone_ckpt are set.
  """
    is_tpu = params['strategy'] == 'tpu'
    if params['img_summary_steps']:
        utils.image('input_image', features, is_tpu)
    training_hooks = []
    params['is_training_bn'] = (mode == tf.estimator.ModeKeys.TRAIN)

    if params['use_keras_model']:

        def model_fn(inputs):
            model = efficientdet_keras.EfficientDetNet(
                config=hparams_config.Config(params))
            cls_out_list, box_out_list = model(inputs,
                                               params['is_training_bn'])
            cls_outputs, box_outputs = {}, {}
            for i in range(params['min_level'], params['max_level'] + 1):
                cls_outputs[i] = cls_out_list[i - params['min_level']]
                box_outputs[i] = box_out_list[i - params['min_level']]
            return cls_outputs, box_outputs
    else:
        model_fn = functools.partial(model,
                                     config=hparams_config.Config(params))

    precision = utils.get_precision(params['strategy'],
                                    params['mixed_precision'])
    cls_outputs, box_outputs = utils.build_model_with_precision(
        precision, model_fn, features, params['is_training_bn'])

    levels = cls_outputs.keys()
    for level in levels:
        cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
        box_outputs[level] = tf.cast(box_outputs[level], tf.float32)

    # Set up training loss and learning rate.
    update_learning_rate_schedule_parameters(params)
    global_step = tf.train.get_or_create_global_step()
    learning_rate = learning_rate_schedule(params, global_step)

    # cls_loss and box_loss are for logging. only total_loss is optimized.
    det_loss, cls_loss, box_loss = detection_loss(cls_outputs, box_outputs,
                                                  labels, params)
    reg_l2loss = reg_l2_loss(params['weight_decay'])
    total_loss = det_loss + reg_l2loss

    if mode == tf.estimator.ModeKeys.TRAIN:
        utils.scalar('lrn_rate', learning_rate, is_tpu)
        utils.scalar('trainloss/cls_loss', cls_loss, is_tpu)
        utils.scalar('trainloss/box_loss', box_loss, is_tpu)
        utils.scalar('trainloss/det_loss', det_loss, is_tpu)
        utils.scalar('trainloss/reg_l2_loss', reg_l2loss, is_tpu)
        utils.scalar('trainloss/loss', total_loss, is_tpu)
        train_epochs = tf.cast(global_step,
                               tf.float32) / params['steps_per_epoch']
        utils.scalar('train_epochs', train_epochs, is_tpu)

    moving_average_decay = params['moving_average_decay']
    if moving_average_decay:
        ema = tf.train.ExponentialMovingAverage(decay=moving_average_decay,
                                                num_updates=global_step)
        ema_vars = utils.get_ema_vars()

    if mode == tf.estimator.ModeKeys.TRAIN:
        if params['optimizer'].lower() == 'sgd':
            optimizer = tf.train.MomentumOptimizer(learning_rate,
                                                   momentum=params['momentum'])
        elif params['optimizer'].lower() == 'adam':
            optimizer = tf.train.AdamOptimizer(learning_rate)
        else:
            raise ValueError('optimizers should be adam or sgd')

        if is_tpu:
            optimizer = tf.tpu.CrossShardOptimizer(optimizer)

        # Batch norm requires update_ops to be added as a train_op dependency.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        var_list = tf.trainable_variables()
        if variable_filter_fn:
            var_list = variable_filter_fn(var_list)

        if params.get('clip_gradients_norm', None):
            logging.info('clip gradients norm by %f',
                         params['clip_gradients_norm'])
            grads_and_vars = optimizer.compute_gradients(total_loss, var_list)
            with tf.name_scope('clip'):
                grads = [gv[0] for gv in grads_and_vars]
                tvars = [gv[1] for gv in grads_and_vars]
                # First clip each variable's norm, then clip global norm.
                clip_norm = abs(params['clip_gradients_norm'])
                clipped_grads = [
                    tf.clip_by_norm(g, clip_norm) if g is not None else None
                    for g in grads
                ]
                clipped_grads, _ = tf.clip_by_global_norm(
                    clipped_grads, clip_norm)
                utils.scalar('gradient_norm',
                             tf.linalg.global_norm(clipped_grads), is_tpu)
                grads_and_vars = list(zip(clipped_grads, tvars))

            with tf.control_dependencies(update_ops):
                train_op = optimizer.apply_gradients(grads_and_vars,
                                                     global_step)
        else:
            with tf.control_dependencies(update_ops):
                train_op = optimizer.minimize(total_loss,
                                              global_step,
                                              var_list=var_list)

        if moving_average_decay:
            with tf.control_dependencies([train_op]):
                train_op = ema.apply(ema_vars)

    else:
        train_op = None

    eval_metrics = None
    if mode == tf.estimator.ModeKeys.EVAL:

        def metric_fn(**kwargs):
            """Returns a dictionary that has the evaluation metrics."""
            if params['nms_configs'].get('pyfunc', True):
                detections_bs = []
                nms_configs = params['nms_configs']
                for index in range(kwargs['boxes'].shape[0]):
                    detections = tf.numpy_function(
                        functools.partial(nms_np.per_class_nms,
                                          nms_configs=nms_configs),
                        [
                            kwargs['boxes'][index],
                            kwargs['scores'][index],
                            kwargs['classes'][index],
                            tf.slice(kwargs['image_ids'], [index], [1]),
                            tf.slice(kwargs['image_scales'], [index], [1]),
                            params['num_classes'],
                            nms_configs['max_output_size'],
                        ], tf.float32)
                    detections_bs.append(detections)
                detections_bs = postprocess.transform_detections(
                    tf.stack(detections_bs))
            else:
                # These two branches should be equivalent, but currently they are not.
                # TODO(tanmingxing): enable the non_pyfun path after bug fix.
                nms_boxes, nms_scores, nms_classes, _ = postprocess.per_class_nms(
                    params, kwargs['boxes'], kwargs['scores'],
                    kwargs['classes'], kwargs['image_scales'])
                img_ids = tf.cast(tf.expand_dims(kwargs['image_ids'], -1),
                                  nms_scores.dtype)
                detections_bs = [
                    img_ids * tf.ones_like(nms_scores),
                    nms_boxes[:, :, 1],
                    nms_boxes[:, :, 0],
                    nms_boxes[:, :, 3] - nms_boxes[:, :, 1],
                    nms_boxes[:, :, 2] - nms_boxes[:, :, 0],
                    nms_scores,
                    nms_classes,
                ]
                detections_bs = tf.stack(detections_bs,
                                         axis=-1,
                                         name='detnections')

            if params.get('testdev_dir', None):
                logging.info('Eval testdev_dir %s', params['testdev_dir'])
                eval_metric = coco_metric.EvaluationMetric(
                    testdev_dir=params['testdev_dir'])
                coco_metrics = eval_metric.estimator_metric_fn(
                    detections_bs, tf.zeros([1]))
            else:
                logging.info('Eval val with groudtruths %s.',
                             params['val_json_file'])
                eval_metric = coco_metric.EvaluationMetric(
                    filename=params['val_json_file'],
                    label_map=params['label_map'])
                coco_metrics = eval_metric.estimator_metric_fn(
                    detections_bs, kwargs['groundtruth_data'])

            # Add metrics to output.
            cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
            box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
            output_metrics = {
                'cls_loss': cls_loss,
                'box_loss': box_loss,
            }
            output_metrics.update(coco_metrics)
            return output_metrics

        cls_loss_repeat = tf.reshape(
            tf.tile(tf.expand_dims(cls_loss, 0), [
                params['batch_size'],
            ]), [params['batch_size'], 1])
        box_loss_repeat = tf.reshape(
            tf.tile(tf.expand_dims(box_loss, 0), [
                params['batch_size'],
            ]), [params['batch_size'], 1])

        cls_outputs = postprocess.to_list(cls_outputs)
        box_outputs = postprocess.to_list(box_outputs)
        params['nms_configs']['max_nms_inputs'] = anchors.MAX_DETECTION_POINTS
        boxes, scores, classes = postprocess.pre_nms(params, cls_outputs,
                                                     box_outputs)
        metric_fn_inputs = {
            'cls_loss_repeat': cls_loss_repeat,
            'box_loss_repeat': box_loss_repeat,
            'image_ids': labels['source_ids'],
            'groundtruth_data': labels['groundtruth_data'],
            'image_scales': labels['image_scales'],
            'boxes': boxes,
            'scores': scores,
            'classes': classes,
        }
        eval_metrics = (metric_fn, metric_fn_inputs)

    checkpoint = params.get('ckpt') or params.get('backbone_ckpt')

    if checkpoint and mode == tf.estimator.ModeKeys.TRAIN:
        # Initialize the model from an EfficientDet or backbone checkpoint.
        if params.get('ckpt') and params.get('backbone_ckpt'):
            raise RuntimeError(
                '--backbone_ckpt and --checkpoint are mutually exclusive')

        if params.get('backbone_ckpt'):
            var_scope = params['backbone_name'] + '/'
            if params['ckpt_var_scope'] is None:
                # Use backbone name as default checkpoint scope.
                ckpt_scope = params['backbone_name'] + '/'
            else:
                ckpt_scope = params['ckpt_var_scope'] + '/'
        else:
            # Load every var in the given checkpoint
            var_scope = ckpt_scope = '/'

        def scaffold_fn():
            """Loads pretrained model through scaffold function."""
            logging.info('restore variables from %s', checkpoint)

            var_map = utils.get_ckpt_var_map(
                ckpt_path=checkpoint,
                ckpt_scope=ckpt_scope,
                var_scope=var_scope,
                skip_mismatch=params['skip_mismatch'])

            tf.train.init_from_checkpoint(checkpoint, var_map)
            return tf.train.Scaffold()
    elif mode == tf.estimator.ModeKeys.EVAL and moving_average_decay:

        def scaffold_fn():
            """Load moving average variables for eval."""
            logging.info('Load EMA vars with ema_decay=%f',
                         moving_average_decay)
            restore_vars_dict = ema.variables_to_restore(ema_vars)
            saver = tf.train.Saver(restore_vars_dict)
            return tf.train.Scaffold(saver=saver)
    else:
        scaffold_fn = None

    if is_tpu:
        return tf.estimator.tpu.TPUEstimatorSpec(
            mode=mode,
            loss=total_loss,
            train_op=train_op,
            eval_metrics=eval_metrics,
            host_call=utils.get_tpu_host_call(global_step, params),
            scaffold_fn=scaffold_fn,
            training_hooks=training_hooks)
    else:
        # Profile every 1K steps.
        if params.get('profile', False):
            profile_hook = tf.estimator.ProfilerHook(
                save_steps=1000,
                output_dir=params['model_dir'],
                show_memory=True)
            training_hooks.append(profile_hook)

            # Report memory allocation if OOM; it will slow down the running.
            class OomReportingHook(tf.estimator.SessionRunHook):
                def before_run(self, run_context):
                    return tf.estimator.SessionRunArgs(
                        fetches=[],
                        options=tf.RunOptions(
                            report_tensor_allocations_upon_oom=True))

            training_hooks.append(OomReportingHook())

        logging_hook = tf.estimator.LoggingTensorHook(
            {
                'step': global_step,
                'det_loss': det_loss,
                'cls_loss': cls_loss,
                'box_loss': box_loss,
            },
            every_n_iter=params.get('iterations_per_loop', 100),
        )
        training_hooks.append(logging_hook)

        eval_metric_ops = (eval_metrics[0](
            **eval_metrics[1]) if eval_metrics else None)
        return tf.estimator.EstimatorSpec(
            mode=mode,
            loss=total_loss,
            train_op=train_op,
            eval_metric_ops=eval_metric_ops,
            scaffold=scaffold_fn() if scaffold_fn else None,
            training_hooks=training_hooks)
Exemple #12
0
#!/usr/bin/env python
from utils import (load, take, show, bgr, image, like, bounds,
channels, crop, scale, color, avail, colorPicker)
from proto import alias, sharpen, group, find, edge, center, distance
from PIL import Image

print "# fast stuff"
img = load('samples/abstract/colors.png')
#b = take()
show(img)
b, g, r = bgr(img)
img = image(b,b,b)
test = like(img)
bound = bounds(b)
channel = channels(b)
coord = (0,0,50,50)
closer = crop(img, coord)
bigger = scale(closer, 2.0)
eyedrop = color(img, 0, 30)
pallet = avail(img)
colorPicker(img,0,30)

print "# slow stuff"
res1 = alias(img, .3)
res2 = sharpen(img, .3)
blob1 = group(img)
mask = Image.new("RGB", (50, 10), "white")
blob3 = find(img,mask,(3,3))
coords1 = edge(img)
coords2 = center(blob1)
dist = distance(0,3)
Exemple #13
0
def lire_mt(nom_fichier):
    def les_cars(nom_fichier):
        with open(nom_fichier, "r") as fichier:
            for ligne in fichier:
                for car in ligne:
                    yield car

    cars = les_cars(nom_fichier)
    car = None
    lig, col = 1, 0

    def avancer():
        nonlocal car, lig, col
        car = next(cars)  ## StopIteration en fin de fichier
        if car == '\n':
            lig += 1
            col = 0
        else:
            col += 1

    class FiniOK(Exception):
        pass

    class Mot():
        def __init__(self):
            self.m = ""

    le_mot = Mot()

    def message(m):
        return "l." + str(lig) + " c." + str(col) + " : " + m

    mt = MT()

    def is_sep():
        return car == ' ' or car == '\t' or car == '\n'

    def is_comm():
        return car == '/'

    def is_sep_comm():
        return is_sep() or is_comm()

    def pass_seps():
        while is_sep():
            avancer()

    def verif_seps():
        assert is_sep(), message("separateur attendu")
        avancer()
        pass_seps()

    def verif_sep_comm():
        if (is_comm()):
            avancer()
            assert car == '/', message("commentaire attendu")
            avancer()
            while car != '\n':
                avancer()
        avancer()

    def pass_seps_comms():
        while is_sep_comm():
            verif_sep_comm()

    def verif_seps_comms():
        assert is_sep_comm(), message("separateur ou commentaire attendu")
        verif_sep_comm()
        pass_seps_comms()

    def lire_symb():
        assert not is_sep(), message("symbole attendu")
        le_mot.m = car
        avancer()

    def lire_sigma():
        lire_symb()
        symb = le_mot.m
        assert symb != mt.blanc, message("blanc (" + mt.blanc +
                                         ") interdit dans Sigma")
        assert symb not in mt.sigma, message("symbole (" + symb +
                                             ") deux fois dans Sigma")
        mt.sigma.add(symb)
        mt.gamma.add(symb)

    def is_mouv():
        return car == 'G' or car == 'D' or car == 'S'

    def lire_mouv():
        assert is_mouv(), message("mouvement (G, D, S) attendu")
        le_mot.m = car
        avancer()

    def lire_etat():
        assert car.isalnum(), message("nom d'etat attendu")
        le_mot.m = car
        try:
            avancer()
            while car.isalnum():
                le_mot.m += car
                avancer()
        finally:
            mt.etats.add(le_mot.m)

    def verif(att):
        assert car == att, message(att + " attendu")
        avancer()

    def etats_finals_eventuels():
        if (car != '$'):  # pas d'etat final
            return

        avancer()  # passer '$'

        pass_seps_comms()

        while True:
            try:
                lire_etat()
                pass_seps_comms()
            except StopIteration:
                raise FiniOK
            finally:
                final = le_mot.m
                assert final not in mt.finals,\
                       message("double definition d'etat final (" +
                               final + ")")
                mt.finals.add(final)

            if (car != ','):
                return

            avancer()  # passer ','
            pass_seps_comms()

    try:
        avancer()

        # ['#' symb]
        # -> blanc eventuel (symb, 'B' par defaut)
        pass_seps_comms()
        if (car == '#'):
            avancer()
            verif_seps()
            lire_symb()
            mt.blanc = le_mot.m
        else:
            mt.blanc = 'B'
        mt.gamma.add(mt.blanc)

        # '&' symb (',' symb)*
        # -> symboles de Sigma, obligatoire
        pass_seps_comms()
        verif('&')
        verif_seps()
        while True:
            lire_sigma()
            pass_seps_comms()
            if (car != ','): break
            avancer()
            pass_seps_comms()

        # '@' etat
        # -> etat initial, obligatoire
        verif('@')
        pass_seps()
        try:
            lire_etat()
            pass_seps_comms()
        except StopIteration:
            raise FiniOK
        finally:
            mt.init = le_mot.m

        # ['$' etats_finals]
        # -> etats finals, eventuellement aucun
        etats_finals_eventuels()

        ### Ici on est sorti OK si fini (pas de transition [1])

        # (transition)*
        # -> transitions, eventuellement aucune
        #                       EN FAIT AU MOINS UNE, VOIR [1]

        while True:
            ### Ici il FAUT une transition

            # source lu : but ecrit mouv
            #    <=> delta(source, but) = (but, ecrit, mouv)

            lire_etat()
            source = le_mot.m
            verif_seps()

            lire_symb()
            lu = le_mot.m
            mt.gamma.add(lu)
            pass_seps()

            verif(':')

            if source in mt.trans:
                assert (lu not in mt.trans[source]),\
                        message("double definition de transition sur " +
                                image((source, lu)))
            else:
                mt.trans[source] = {}

            pass_seps_comms()

            lire_etat()
            but = le_mot.m
            verif_seps()

            lire_symb()
            ecrit = le_mot.m
            mt.gamma.add(ecrit)
            verif_seps()

            try:
                lire_mouv()
                verif_seps_comms()
            except StopIteration:
                raise FiniOK  ### Seule facon de sortir OK de la boucle
            finally:
                mouv = le_mot.m
                mt.trans[source][lu] = (but, ecrit, mouv)

    except FiniOK:
        return mt

    except StopIteration:
        print("ERREUR : " + "fin de fichier inattendue", file=sys.stderr)
        sys.exit(1)

    except AssertionError as err:
        print("ERREUR : " + err.args[0], file=sys.stderr)
        sys.exit(1)
        title='Test Set Visualization',
        xlabel='Latent dim 1',
        ylabel='Latent dim 2')

print('\nCorresponding mapping coordinates in the latent space - one image per digit:\n')
encoded_x_test = encoder.predict(x_test)
digits = {}
i = 0
while len(digits) < 10:
    digits[y_test[i]] = encoded_x_test[i]
    i += 1

ut.output('g.c', pd.DataFrame(digits))

# (d)
z_sample = np.array([[-2.5, 0.55]])
decoded_x = generator.predict(z_sample)
ut.image(decoded_x)
ut.plt_save('g.d')

# (e)
source, target = digits[3], digits[5]
(x1, y1), (x2, y2) = source, target
a = (y2 - y1) / (x2 - x1)
b = y1 - (a * x1)
f = lambda _x_: a * _x_ + b
x_samples = np.linspace(x1, x2, num=10)
samples = [np.array([[_x, f(_x)]]) for _x in x_samples]
ut.images(map(generator.predict, samples), map(str, x_samples), n_cols=3)
ut.plt_save('g.e')
def _model_fn(features, labels, mode, params, model, variable_filter_fn=None):
    """Model definition entry.

  Args:
    features: the input image tensor with shape [batch_size, height, width, 3].
      The height and width are fixed and equal.
    labels: the input labels in a dictionary. The labels include class targets
      and box targets which are dense label maps. The labels are generated from
      get_input_fn function in data/dataloader.py
    mode: the mode of TPUEstimator including TRAIN, EVAL, and PREDICT.
    params: the dictionary defines hyperparameters of model. The default
      settings are in default_hparams function in this file.
    model: the model outputs class logits and box regression outputs.
    variable_filter_fn: the filter function that takes trainable_variables and
      returns the variable list after applying the filter rule.

  Returns:
    tpu_spec: the TPUEstimatorSpec to run training, evaluation, or prediction.

  Raises:
    RuntimeError: if both ckpt and backbone_ckpt are set.
  """
    utils.image('input_image', features)
    training_hooks = []
    params['is_training_bn'] = (mode == tf.estimator.ModeKeys.TRAIN)

    if params['use_keras_model']:

        def model_fn(inputs):
            model = efficientdet_keras.EfficientDetNet(
                config=hparams_config.Config(params))
            cls_out_list, box_out_list = model(inputs,
                                               params['is_training_bn'])
            cls_outputs, box_outputs = {}, {}
            for i in range(params['min_level'], params['max_level'] + 1):
                cls_outputs[i] = cls_out_list[i - params['min_level']]
                box_outputs[i] = box_out_list[i - params['min_level']]
            return cls_outputs, box_outputs
    else:
        model_fn = functools.partial(model,
                                     config=hparams_config.Config(params))

    precision = utils.get_precision(params['strategy'],
                                    params['mixed_precision'])
    cls_outputs, box_outputs = utils.build_model_with_precision(
        precision, model_fn, features, params['is_training_bn'])

    levels = cls_outputs.keys()
    for level in levels:
        cls_outputs[level] = tf.cast(cls_outputs[level], tf.float32)
        box_outputs[level] = tf.cast(box_outputs[level], tf.float32)

    # First check if it is in PREDICT mode.
    if mode == tf.estimator.ModeKeys.PREDICT:
        predictions = {
            'image': features,
        }
        for level in levels:
            predictions['cls_outputs_%d' % level] = cls_outputs[level]
            predictions['box_outputs_%d' % level] = box_outputs[level]
        return tf.estimator.EstimatorSpec(mode=mode, predictions=predictions)

    # Set up training loss and learning rate.
    update_learning_rate_schedule_parameters(params)
    global_step = tf.train.get_or_create_global_step()
    learning_rate = learning_rate_schedule(params, global_step)

    # cls_loss and box_loss are for logging. only total_loss is optimized.
    det_loss, cls_loss, box_loss, box_iou_loss = detection_loss(
        cls_outputs, box_outputs, labels, params)
    reg_l2loss = reg_l2_loss(params['weight_decay'])
    total_loss = det_loss + reg_l2loss

    if mode == tf.estimator.ModeKeys.TRAIN:
        utils.scalar('lrn_rate', learning_rate)
        utils.scalar('trainloss/cls_loss', cls_loss)
        utils.scalar('trainloss/box_loss', box_loss)
        utils.scalar('trainloss/det_loss', det_loss)
        utils.scalar('trainloss/reg_l2_loss', reg_l2loss)
        utils.scalar('trainloss/loss', total_loss)
        if params['iou_loss_type']:
            utils.scalar('trainloss/box_iou_loss', box_iou_loss)
        train_epochs = tf.cast(global_step,
                               tf.float32) / params['steps_per_epoch']
        utils.scalar('train_epochs', train_epochs)

    moving_average_decay = params['moving_average_decay']
    if moving_average_decay:
        ema = tf.train.ExponentialMovingAverage(decay=moving_average_decay,
                                                num_updates=global_step)
        ema_vars = utils.get_ema_vars()

    if mode == tf.estimator.ModeKeys.TRAIN:
        if params['optimizer'].lower() == 'sgd':
            optimizer = tf.train.MomentumOptimizer(learning_rate,
                                                   momentum=params['momentum'])
        elif params['optimizer'].lower() == 'adam':
            optimizer = tf.train.AdamOptimizer(learning_rate)
        else:
            raise ValueError('optimizers should be adam or sgd')

        if params['strategy'] == 'tpu':
            optimizer = tf.tpu.CrossShardOptimizer(optimizer)
        if params['gradient_checkpointing']:
            from third_party.grad_checkpoint \
                import memory_saving_gradients  # pylint: disable=g-import-not-at-top
            from tensorflow.python.ops \
                import gradients  # pylint: disable=g-import-not-at-top

            # monkey patch tf.gradients to point to our custom version,
            # with automatic checkpoint selection
            def gradients_(ys, xs, grad_ys=None, **kwargs):
                return memory_saving_gradients.gradients(
                    ys,
                    xs,
                    grad_ys,
                    checkpoints=params['gradient_checkpointing_list'],
                    **kwargs)

            gradients.__dict__["gradients"] = gradients_

        # Batch norm requires update_ops to be added as a train_op dependency.
        update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
        var_list = tf.trainable_variables()
        if variable_filter_fn:
            var_list = variable_filter_fn(var_list)

        if params.get('clip_gradients_norm', None):
            logging.info('clip gradients norm by %f',
                         params['clip_gradients_norm'])
            grads_and_vars = optimizer.compute_gradients(total_loss, var_list)
            with tf.name_scope('clip'):
                grads = [gv[0] for gv in grads_and_vars]
                tvars = [gv[1] for gv in grads_and_vars]
                # First clip each variable's norm, then clip global norm.
                clip_norm = abs(params['clip_gradients_norm'])
                clipped_grads = [tf.clip_by_norm(g, clip_norm) for g in grads]
                clipped_grads, _ = tf.clip_by_global_norm(
                    clipped_grads, clip_norm)
                utils.scalar('gradient_norm',
                             tf.linalg.global_norm(clipped_grads))
                grads_and_vars = list(zip(clipped_grads, tvars))

            with tf.control_dependencies(update_ops):
                train_op = optimizer.apply_gradients(grads_and_vars,
                                                     global_step)
        else:
            with tf.control_dependencies(update_ops):
                train_op = optimizer.minimize(total_loss,
                                              global_step,
                                              var_list=var_list)

        if moving_average_decay:
            with tf.control_dependencies([train_op]):
                train_op = ema.apply(ema_vars)

    else:
        train_op = None

    eval_metrics = None
    if mode == tf.estimator.ModeKeys.EVAL:

        def metric_fn(**kwargs):
            """Returns a dictionary that has the evaluation metrics."""
            if params['nms_configs'].get('pyfunc', True):
                detections_bs = []
                for index in range(kwargs['boxes'].shape[0]):
                    nms_configs = params['nms_configs']
                    detections = tf.numpy_function(
                        functools.partial(nms_np.per_class_nms,
                                          nms_configs=nms_configs),
                        [
                            kwargs['boxes'][index],
                            kwargs['scores'][index],
                            kwargs['classes'][index],
                            tf.slice(kwargs['image_ids'], [index], [1]),
                            tf.slice(kwargs['image_scales'], [index], [1]),
                            params['num_classes'],
                            nms_configs['max_output_size'],
                        ], tf.float32)
                    detections_bs.append(detections)
                detections_bs = postprocess.transform_detections(
                    tf.stack(detections_bs))
            else:
                # These two branches should be equivalent, but currently they are not.
                # TODO(tanmingxing): enable the non_pyfun path after bug fix.
                nms_boxes, nms_scores, nms_classes, _ = postprocess.per_class_nms(
                    params, kwargs['boxes'], kwargs['scores'],
                    kwargs['classes'], kwargs['image_scales'])
                img_ids = tf.cast(tf.expand_dims(kwargs['image_ids'], -1),
                                  nms_scores.dtype)
                detections_bs = [
                    img_ids * tf.ones_like(nms_scores),
                    nms_boxes[:, :, 1],
                    nms_boxes[:, :, 0],
                    nms_boxes[:, :, 3] - nms_boxes[:, :, 1],
                    nms_boxes[:, :, 2] - nms_boxes[:, :, 0],
                    nms_scores,
                    nms_classes,
                ]
                detections_bs = tf.stack(detections_bs,
                                         axis=-1,
                                         name='detnections')

            if params.get('testdev_dir', None):
                logging.info('Eval testdev_dir %s', params['testdev_dir'])
                eval_metric = coco_metric.EvaluationMetric(
                    testdev_dir=params['testdev_dir'])
                coco_metrics = eval_metric.estimator_metric_fn(
                    detections_bs, tf.zeros([1]))
            else:
                logging.info('Eval val with groudtruths %s.',
                             params['val_json_file'])
                eval_metric = coco_metric.EvaluationMetric(
                    filename=params['val_json_file'])
                coco_metrics = eval_metric.estimator_metric_fn(
                    detections_bs, kwargs['groundtruth_data'],
                    params['label_map'])

            # Add metrics to output.
            cls_loss = tf.metrics.mean(kwargs['cls_loss_repeat'])
            box_loss = tf.metrics.mean(kwargs['box_loss_repeat'])
            output_metrics = {
                'cls_loss': cls_loss,
                'box_loss': box_loss,
            }
            output_metrics.update(coco_metrics)
            return output_metrics

        cls_loss_repeat = tf.reshape(
            tf.tile(tf.expand_dims(cls_loss, 0), [
                params['batch_size'],
            ]), [params['batch_size'], 1])
        box_loss_repeat = tf.reshape(
            tf.tile(tf.expand_dims(box_loss, 0), [
                params['batch_size'],
            ]), [params['batch_size'], 1])

        cls_outputs = postprocess.to_list(cls_outputs)
        box_outputs = postprocess.to_list(box_outputs)
        params['nms_configs']['max_nms_inputs'] = anchors.MAX_DETECTION_POINTS
        boxes, scores, classes = postprocess.pre_nms(params, cls_outputs,
                                                     box_outputs)
        metric_fn_inputs = {
            'cls_loss_repeat': cls_loss_repeat,
            'box_loss_repeat': box_loss_repeat,
            'image_ids': labels['source_ids'],
            'groundtruth_data': labels['groundtruth_data'],
            'image_scales': labels['image_scales'],
            'boxes': boxes,
            'scores': scores,
            'classes': classes,
        }
        eval_metrics = (metric_fn, metric_fn_inputs)

    checkpoint = params.get('ckpt') or params.get('backbone_ckpt')

    if checkpoint and mode == tf.estimator.ModeKeys.TRAIN:
        # Initialize the model from an EfficientDet or backbone checkpoint.
        if params.get('ckpt') and params.get('backbone_ckpt'):
            raise RuntimeError(
                '--backbone_ckpt and --checkpoint are mutually exclusive')

        if params.get('backbone_ckpt'):
            var_scope = params['backbone_name'] + '/'
            if params['ckpt_var_scope'] is None:
                # Use backbone name as default checkpoint scope.
                ckpt_scope = params['backbone_name'] + '/'
            else:
                ckpt_scope = params['ckpt_var_scope'] + '/'
        else:
            # Load every var in the given checkpoint
            var_scope = ckpt_scope = '/'

        def scaffold_fn():
            """Loads pretrained model through scaffold function."""
            logging.info('restore variables from %s', checkpoint)

            var_map = utils.get_ckpt_var_map(
                ckpt_path=checkpoint,
                ckpt_scope=ckpt_scope,
                var_scope=var_scope,
                skip_mismatch=params['skip_mismatch'])

            tf.train.init_from_checkpoint(checkpoint, var_map)
            return tf.train.Scaffold()
    elif mode == tf.estimator.ModeKeys.EVAL and moving_average_decay:

        def scaffold_fn():
            """Load moving average variables for eval."""
            logging.info('Load EMA vars with ema_decay=%f',
                         moving_average_decay)
            restore_vars_dict = ema.variables_to_restore(ema_vars)
            saver = tf.train.Saver(restore_vars_dict)
            return tf.train.Scaffold(saver=saver)
    else:
        scaffold_fn = None

    if params['strategy'] != 'tpu':
        # Profile every 1K steps.
        if params.get('profile', False):
            profile_hook = tf.estimator.ProfilerHook(
                save_steps=1000,
                output_dir=params['model_dir'],
                show_memory=True)
            training_hooks.append(profile_hook)

            # Report memory allocation if OOM
            class OomReportingHook(tf.estimator.SessionRunHook):
                def before_run(self, run_context):
                    return tf.estimator.SessionRunArgs(
                        fetches=[],
                        options=tf.RunOptions(
                            report_tensor_allocations_upon_oom=True))

            training_hooks.append(OomReportingHook())

        logging_hook = tf.estimator.LoggingTensorHook(
            {
                'step': global_step,
                'det_loss': det_loss,
                'cls_loss': cls_loss,
                'box_loss': box_loss,
            },
            every_n_iter=params.get('iterations_per_loop', 100),
        )
        training_hooks.append(logging_hook)

        if params["nvgpu_logging"]:
            try:
                from third_party import nvgpu  # pylint: disable=g-import-not-at-top
                from functools import reduce  # pylint: disable=g-import-not-at-top

                def get_nested_value(d, path):
                    return reduce(dict.get, path, d)

                def nvgpu_gpu_info(inp):
                    inp = inp.decode("utf-8")
                    inp = inp.split(",")
                    inp = [x.strip() for x in inp]
                    value = get_nested_value(nvgpu.gpu_info(), inp)
                    return np.str(value)

                def commonsize(inp):
                    const_sizes = {
                        'B': 1,
                        'KB': 1e3,
                        'MB': 1e6,
                        'GB': 1e9,
                        'TB': 1e12,
                        'PB': 1e15,
                        'KiB': 1024,
                        'MiB': 1048576,
                        'GiB': 1073741824
                    }
                    inp = inp.split(" ")
                    # convert all to MiB
                    if inp[1] != 'MiB':
                        inp_ = float(
                            inp[0]) * (const_sizes[inp[1]] / 1048576.0)
                    else:
                        inp_ = float(inp[0])

                    return inp_

                def formatter_log(tensors):
                    """Format the output."""
                    mem_used = tensors["memory used"].decode("utf-8")
                    mem_total = tensors["memory total"].decode("utf-8")
                    mem_util = commonsize(mem_used) / commonsize(mem_total)
                    logstring = "GPU memory used: {} = {:.1%} of total GPU memory: {}".format(
                        mem_used, mem_util, mem_total)
                    return logstring

                mem_used = tf.py_func(nvgpu_gpu_info,
                                      ['gpu, fb_memory_usage, used'],
                                      [tf.string])[0]
                mem_total = tf.py_func(nvgpu_gpu_info,
                                       ['gpu, fb_memory_usage, total'],
                                       [tf.string])[0]

                logging_hook3 = tf.estimator.LoggingTensorHook(
                    tensors={
                        "memory used": mem_used,
                        "memory total": mem_total,
                    },
                    every_n_iter=params.get('iterations_per_loop', 100),
                    formatter=formatter_log,
                )
                training_hooks.append(logging_hook3)
            except:
                logging.error("nvgpu error: nvidia-smi format not recognized")

    if params['strategy'] == 'tpu':
        return tf.estimator.tpu.TPUEstimatorSpec(
            mode=mode,
            loss=total_loss,
            train_op=train_op,
            eval_metrics=eval_metrics,
            host_call=utils.get_tpu_host_call(global_step, params),
            scaffold_fn=scaffold_fn,
            training_hooks=training_hooks)
    else:
        eval_metric_ops = eval_metrics[0](
            **eval_metrics[1]) if eval_metrics else None
        utils.get_tpu_host_call(global_step, params)
        return tf.estimator.EstimatorSpec(mode=mode,
                                          loss=total_loss,
                                          train_op=train_op,
                                          eval_metric_ops=eval_metric_ops,
                                          scaffold=scaffold_fn(),
                                          training_hooks=training_hooks)
Exemple #16
0
    f.root.Group_M_Crit200[:] = cat.Group_M_Crit200
    f.root.GroupMassInRadType[:] = cat.SubhaloMassInRadType[first]
    f.root.GroupMassInHalfRadType[:] = cat.SubhaloMassInHalfRadType[first]
    f.root.GroupHalfmassRadType[:] = cat.SubhaloHalfmassRadType[first]

    f.flush()

    for grp in groups:
        for parttype in [0, 4]:
            subid = cat.GroupFirstSub[grp]
            pos = snapshot.loadSubhalo(fdir, snap, subid, parttype,
                                       ["Coordinates"])  #kpc/h
            mass = snapshot.loadSubhalo(fdir, snap, subid, parttype,
                                        ["Masses"])  #1e10 msun/h

            if type(pos) is dict:
                assert pos['count'] == 0

            else:
                pos = utils.image(pos - cat.SubhaloPos[subid], None,
                                  header.boxsize)  #kpc
                #pos = ((pos - cat.SubhaloPos[subid])-box/2.)%box - box/2.
                dist = np.linalg.norm(pos, axis=1) / hubble

                masses[grp, 0, parttype] = mass[dist < 5].sum()
                masses[grp, 1, parttype] = mass[dist < 10].sum()
                masses[grp, 2, parttype] = mass[dist < 30].sum()
                masses[grp, 3, parttype] = mass[dist < 100].sum()

    f.flush()