Exemple #1
0
def _get_args_and_errors(self, minuit=None, args=None, errors=None):
    """
    consistent algorithm to get argument and errors
    1) get it from minuit if minuit is available
    2) if not get it from args and errors
    2.1) if args is dict parse it.
    3) if all else fail get it from self.last_arg
    """
    ret_arg = None
    ret_error = None
    if minuit is not None:  # case 1
        ret_arg = minuit.args
        ret_error = minuit.errors
        return ret_arg, ret_error

    #no minuit specified use args and errors
    if args is not None:
        if isinstance(args, dict):
            ret_arg = parse_arg(self, args)
        else:
            ret_arg = args
    else:  # case 3
        ret_arg = self.last_arg

    if errors is not None:
        ret_error = errors

    return ret_arg, ret_error
Exemple #2
0
def draw_x2_residual(self,
                     minuit=None,
                     ax=None,
                     args=None,
                     errors=None,
                     grid=True,
                     norm=False):
    ax = plt.gca() if ax is None else ax

    arg, error = _get_args_and_errors(self, minuit, args, errors)

    x = self.x
    y = self.y
    data_err = self.error
    f = self.f

    arg = parse_arg(f, arg, 1) if isinstance(arg, dict) else arg
    yf = vector_apply(f, x, *arg)

    yplot = y - yf
    eplot = data_err if data_err is not None else np.zeros(len(x))
    if norm:
        if data_err is None:
            warn(
                RuntimeWarning(
                    'No error on data points; cannot normalize to error'))
        else:
            yplot = yplot / data_err
            eplot = data_err / data_err
    ax.errorbar(x, yplot, eplot, fmt='b+')
    ax.grid(grid)
Exemple #3
0
def _get_args_and_errors(self, minuit=None, args=None, errors=None):
    """
    consistent algorithm to get argument and errors
    1) get it from minuit if minuit is available
    2) if not get it from args and errors
    2.1) if args is dict parse it.
    3) if all else fail get it from self.last_arg
    """
    ret_arg = None
    ret_error = None
    if minuit is not None: # case 1
        ret_arg = minuit.args
        ret_error = minuit.errors
        return ret_arg, ret_error

    #no minuit specified use args and errors
    if args is not None:
        if isinstance(args, dict):
            ret_arg = parse_arg(self, args)
        else:
            ret_arg = args
    else: # case 3
        ret_arg = self.last_arg

    if errors is not None:
        ret_error = errors

    return ret_arg, ret_error
Exemple #4
0
def draw_compare(f, arg, edges, data, errors=None, ax=None, grid=True, normed=False, parts=False):
    """
    TODO: this needs to be rewritten
    """
    #arg is either map or tuple
    ax = plt.gca() if ax is None else ax
    arg = parse_arg(f, arg, 1) if isinstance(arg, dict) else arg
    x = (edges[:-1]+edges[1:])/2.0
    bw = np.diff(edges)
    yf = vector_apply(f, x, *arg)
    total = np.sum(data)
    if normed:
        ax.errorbar(x, data/bw/total, errors/bw/total, fmt='b+', capsize=0)
        ax.plot(x, yf, 'r', lw=2)
    else:
        ax.errorbar(x, data, errors, fmt='b+', capsize=0)
        ax.plot(x, yf*bw, 'r', lw=2)

    #now draw the parts
    if parts:
        if not hasattr(f, 'eval_parts'):
            warn(RuntimeWarning('parts is set to True but function does '
                            'not have eval_parts method'))
        else:
            scale = bw if not normed else 1.
            parts_val = list()
            for tx in x:
                val = f.eval_parts(tx, *arg)
                parts_val.append(val)
            py = zip(*parts_val)
            for y in py:
                tmpy = np.array(y)
                ax.plot(x, tmpy*scale, lw=2, alpha=0.5)
    plt.grid(grid)
    return x, yf, data
Exemple #5
0
def draw_x2_residual(self, minuit=None, ax=None, args=None, errors=None, grid=True,
                     norm=False):
    ax = plt.gca() if ax is None else ax

    arg, error = _get_args_and_errors(self, minuit, args, errors)

    x=self.x
    y=self.y
    data_err = self.error
    f=self.f

    arg = parse_arg(f, arg, 1) if isinstance(arg, dict) else arg
    yf = vector_apply(f, x, *arg)

    yplot= y-yf
    eplot= data_err if data_err is not None else np.zeros(len(x))
    if norm:
        if data_err is None:
            warn(RuntimeWarning('No error on data points; cannot normalize to error'))
        else:
            yplot= yplot/data_err
            eplot= data_err/data_err
    ax.errorbar(x, yplot, eplot, fmt='b+', capsize=0)
    ax.grid(grid)
    return x, yplot, eplot
Exemple #6
0
def draw_residual_ulh(
    self,
    minuit=None,
    bins=100,
    ax=None,
    bound=None,
    parmloc=(0.05, 0.95),
    print_par=False,
    grid=True,
    args=None,
    errors=None,
    show_errbars=True,
    errbar_algo="normal",
    norm=False,
):

    ax = plt.gca() if ax is None else ax

    arg, error = _get_args_and_errors(self, minuit, args, errors)

    n, e = np.histogram(self.data, bins=bins, range=bound, weights=self.weights)
    dataint = (n * np.diff(e)).sum()
    scale = dataint if not self.extended else 1.0
    w2 = None
    if errbar_algo == "normal":
        w2 = n
    elif errbar_algo == "sumw2":
        weights = None
        if self.weights != None:
            weights = self.weights ** 2
        w2, e = np.histogram(self.data, bins=e, weights=weights)
    else:
        raise ValueError("errbar_algo must be 'normal' or 'sumw2'")
    yerr = np.sqrt(w2)

    arg = parse_arg(self.f, arg, 1) if isinstance(arg, dict) else arg
    yf = vector_apply(self.f, mid(e), *arg)
    yf *= scale * np.diff(e) if self.extended else scale
    n = n - yf
    if norm:
        sel = yerr > 0
        n[sel] /= yerr[sel]
        yerr = np.ones(len(yerr))

    if show_errbars:
        pp = ax.errorbar(mid(e), n, yerr, fmt="b.", capsize=0)
    else:  # No errorbars
        pp = ax.bar(e[:-1], n, width=np.diff(e))

    # bound = (e[0], e[-1])
    # draw_arg = [('lw', 2), ('color', 'r')]
    ax.plot([e[0], e[-1]], [0.0, 0.0], "r-")

    ax.grid(grid)

    txt = _param_text(describe(self), arg, error)
    if print_par:
        ax.text(parmloc[0], parmloc[1], txt, ha="left", va="top", transform=ax.transAxes)
Exemple #7
0
def draw_pdf_with_midpoints(f, arg, x, ax=None, scale=1.0, normed_pdf=False, **kwds):
    ax = plt.gca() if ax is None else ax
    arg = parse_arg(f, arg, 1) if isinstance(arg, dict) else arg
    yf = vector_apply(f, x, *arg)

    if normed_pdf:
        normed_factor = sum(yf) # assume equal binwidth
        yf /= normed_factor
    yf *= scale

    ax.plot(x, yf, **kwds)
    return x, yf
Exemple #8
0
def draw_residual_blh(self,
                      minuit=None,
                      parmloc=(0.05, 0.95),
                      ax=None,
                      print_par=False,
                      args=None,
                      errors=None,
                      norm=False,
                      grid=True):
    ax = plt.gca() if ax is None else ax

    arg, error = _get_args_and_errors(self, minuit, args, errors)

    m = mid(self.edges)

    if self.use_w2:
        err = np.sqrt(self.w2)
    else:
        err = np.sqrt(self.h)

    n = np.copy(self.h)
    dataint = (n * np.diff(self.edges)).sum()
    scale = dataint if not self.extended else 1.0

    arg = parse_arg(self.f, arg, 1) if isinstance(arg, dict) else arg
    yf = vector_apply(self.f, m, *arg)
    yf *= (scale * np.diff(self.edges) if self.extended else scale)
    n = n - yf
    if norm:
        sel = err > 0
        n[sel] /= err[sel]
        err = np.ones(len(err))

    ax.errorbar(m, n, err, fmt='.')

    ax.plot([self.edges[0], self.edges[-1]], [0., 0.], 'r-')

    ax.grid(grid)

    txt = _param_text(describe(self), arg, error)

    if print_par:
        ax.text(parmloc[0],
                parmloc[1],
                txt,
                ha='left',
                va='top',
                transform=ax.transAxes)
Exemple #9
0
def draw_compare(f,
                 arg,
                 edges,
                 data,
                 errors=None,
                 ax=None,
                 grid=True,
                 normed=False,
                 parts=False):
    """
    TODO: this needs to be rewritten
    """
    #arg is either map or tuple
    ax = plt.gca() if ax is None else ax
    arg = parse_arg(f, arg, 1) if isinstance(arg, dict) else arg
    x = (edges[:-1] + edges[1:]) / 2.0
    bw = np.diff(edges)
    yf = vector_apply(f, x, *arg)
    total = np.sum(data)
    if normed:
        ax.errorbar(x, data / bw / total, errors / bw / total, fmt='.b')
        ax.plot(x, yf, 'r', lw=2)
    else:
        ax.errorbar(x, data, errors, fmt='.b')
        ax.plot(x, yf * bw, 'r', lw=2)

    #now draw the parts
    if parts:
        if not hasattr(f, 'eval_parts'):
            warn(
                RuntimeWarning('parts is set to True but function does '
                               'not have eval_parts method'))
        else:
            scale = bw if not normed else 1.
            parts_val = list()
            for tx in x:
                val = f.eval_parts(tx, *arg)
                parts_val.append(val)
            py = zip(*parts_val)
            for y in py:
                tmpy = np.array(y)
                ax.plot(x, tmpy * scale, lw=2, alpha=0.5)
    plt.grid(grid)
    return x, yf, data
Exemple #10
0
def draw_pdf_with_midpoints(f,
                            arg,
                            x,
                            ax=None,
                            scale=1.0,
                            normed_pdf=False,
                            no_plot=False,
                            **kwds):
    ax = plt.gca() if ax is None and not no_plot else ax
    arg = parse_arg(f, arg, 1) if isinstance(arg, dict) else arg
    yf = vector_apply(f, x, *arg)

    if normed_pdf:
        normed_factor = sum(yf)  # assume equal binwidth
        yf /= normed_factor
    yf *= scale

    if not no_plot:
        ax.plot(x, yf, **kwds)
    return x, yf
Exemple #11
0
def draw_residual_blh(self, minuit=None, parmloc=(0.05, 0.95),
                      ax=None, print_par=False, args=None, errors=None,
                      norm=False, grid=True):
    ax = plt.gca() if ax is None else ax

    arg, error = _get_args_and_errors(self, minuit, args, errors)

    m = mid(self.edges)

    if self.use_w2:
        err = np.sqrt(self.w2)
    else:
        err = np.sqrt(self.h)

    n= np.copy(self.h)
    dataint= (n*np.diff(self.edges)).sum()
    scale= dataint if not self.extended else 1.0

    arg = parse_arg(self.f, arg, 1) if isinstance(arg, dict) else arg
    yf = vector_apply(self.f, m, *arg)
    yf*= (scale*np.diff(self.edges) if self.extended else scale)
    n = n- yf
    if norm:
        sel= err>0
        n[sel]/= err[sel]
        err= np.ones(len(err))

    ax.errorbar(m, n, err, fmt='+', capsize=0)

    ax.plot([self.edges[0],self.edges[-1]],[0.,0.], 'r-')

    ax.grid(grid)

    txt = _param_text(describe(self), arg, error)

    if print_par:
        ax.text(parmloc[0], parmloc[1], txt, ha='left', va='top',
            transform=ax.transAxes)
    return m, n, err
  global DECK_BUFFER_NUM, SCAN_COUNT
  while True:
    try:
      DECK_BUFFER_NUM = 0
      SCAN_COUNT = load_key('SCAN_COUNT', 1)
      pagenum = load_key('CURRENT_PAGE', 1)
      rownum = load_key('CURRENT_ROW', 0)
      while parse_page(pagenum, rownum):
        pagenum += 1
        rownum = 0
      deck_remove_unscanned(SCAN_COUNT)
      break
    except (IndexError, KeyError) as e:
      database_rollback()
      print e
      print 'Retry after %d seconds.' % REST_INTERVAL
      time.sleep(REST_INTERVAL)
  database_commit()
  SCAN_COUNT += 1
  save_key('SCAN_COUNT', SCAN_COUNT)
  save_key('CURRENT_PAGE', 1)
  save_key('CURRENT_ROW', 0)
  print 'Scan pass %d finished!' % (SCAN_COUNT - 1)

if __name__ == '__main__':
  (database_name,) = parse_arg((str,), 1)
  database_connect(database_name)
  deck_create()
  parse()
  database_close()
Exemple #13
0
def draw_residual_ulh(self,
                      minuit=None,
                      bins=100,
                      ax=None,
                      bound=None,
                      parmloc=(0.05, 0.95),
                      print_par=False,
                      grid=True,
                      args=None,
                      errors=None,
                      show_errbars=True,
                      errbar_algo='normal',
                      norm=False):

    ax = plt.gca() if ax is None else ax

    arg, error = _get_args_and_errors(self, minuit, args, errors)

    n, e = np.histogram(self.data,
                        bins=bins,
                        range=bound,
                        weights=self.weights)
    dataint = (n * np.diff(e)).sum()
    scale = dataint if not self.extended else 1.0
    w2 = None
    if errbar_algo == 'normal':
        w2 = n
    elif errbar_algo == 'sumw2':
        weights = None
        if self.weights != None:
            weights = self.weights**2
        w2, e = np.histogram(self.data, bins=e, weights=weights)
    else:
        raise ValueError('errbar_algo must be \'normal\' or \'sumw2\'')
    yerr = np.sqrt(w2)

    arg = parse_arg(self.f, arg, 1) if isinstance(arg, dict) else arg
    yf = vector_apply(self.f, mid(e), *arg)
    yf *= (scale * np.diff(e) if self.extended else scale)
    n = n - yf
    if norm:
        sel = yerr > 0
        n[sel] /= yerr[sel]
        yerr = np.ones(len(yerr))

    if show_errbars:
        pp = ax.errorbar(mid(e), n, yerr, fmt='b.', capsize=0)
    else:  # No errorbars
        pp = ax.bar(e[:-1], n, width=np.diff(e))

    #bound = (e[0], e[-1])
    #draw_arg = [('lw', 2), ('color', 'r')]
    ax.plot([e[0], e[-1]], [0., 0.], 'r-')

    ax.grid(grid)

    txt = _param_text(describe(self), arg, error)
    if print_par:
        ax.text(parmloc[0],
                parmloc[1],
                txt,
                ha='left',
                va='top',
                transform=ax.transAxes)
Exemple #14
0
#!/usr/bin/env python3
import os
import sys
import util
import socket
import logging

if __name__ == "__main__":
    logging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',
                        level=logging.DEBUG)
    args = util.parse_arg()
    with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
        s.connect((args.address, args.port))
        logging.info(f'connected to {args.address}:{args.port}')

        if args.client2server:
            util.file2conn(args.file, s)
        else:
            util.conn2file(args.file, s)
def show_deck (deck, cost_total, card_hand, card_forge, card_soul):
  print '%s (by %s)' % (deck.name, deck.author)
  print 'Rating: %d, Type: %s, Updated: %s' % (deck.rating, deck.type, time.strftime('%Y-%m-%d %H:%M:%S', deck.time_update))
  if card_hand:
    print 'Cards already in your collection:'
    print '\n'.join(['  %s x %d' % (card.colored_name(), count) for (card, count) in card_hand])
  if card_forge:
    print 'Cards can be forged:'
    print '\n'.join(['  %s x %d' % (card.colored_name(), count) for (card, count) in card_forge])
    print '  Total Arcane Dust: %d' % cost_total
  if card_soul:
    print 'Cards can not be forged (Soulbound):'
    print '\n'.join(['  %s x %d' % (card.colored_name(), count) for (card, count) in card_soul])
  print

if __name__ == '__main__':
  (database_name, collection_name, hero_class, dust_amount) = parse_arg((str, str, str, int), 3)
  if dust_amount is None:
    dust_amount = 0
  load_card_collection(collection_name)
  database_connect(database_name)
  for row in deck_select_by_class(hero_class):
    deck = Deck.from_database(row)
    if deck.is_valid():
      (card_hand, card_forge, card_soul, cost_total) = split_card(deck)
      if not card_soul and cost_total <= dust_amount or dust_amount == -1:
        show_deck(deck, cost_total, card_hand, card_forge, card_soul)
        if raw_input('Press Enter to continue. Input (X) and enter to exit.\n').upper() == 'X':
          break
  database_close()
      card_count[rarity][count + 1] += 1
      required_dust -= FORGE_COST[rarity]
    else:
      curr_dust += DUST_VALUE[rarity]
  return num_draw

def estimate_pack_num (card_count, curr_dust, required_dust, simulate_num):
  ave_draw = 0.0
  simulate_round = 0
  while simulate_round < simulate_num:
    card_count_copy = copy.deepcopy(card_count)
    ave_draw += simulate_draw(card_count_copy, curr_dust, required_dust)
    simulate_round += 1
  ave_draw /= simulate_num
  return int(math.ceil(round(ave_draw) / CARD_PER_PACK))

if __name__ == '__main__':
  (collection_name, card_set, dust_amount, simulate_num) = parse_arg((str, str, int, int), 2)
  if card_set not in Card.SET_PURCHASE:
    print '%s is not available to purchase in Shop.' % card_set
    sys.exit(1)
  if dust_amount is None:
    dust_amount = 0
  if simulate_num is None:
    simulate_num = 100
  init_global()
  card_count = count_card_collection(collection_name, card_set)
  required_dust = calc_required_dust(card_count)
  pack_num = estimate_pack_num(card_count, dust_amount, required_dust, simulate_num)
  print 'The estimated number of packs to purchase for set %s is %d.' % (card_set, pack_num)
                                              pred_dist[last_indi:indi],
                                              input_doc, input_bigram,
                                              nucleus_filter=nucleus_filter, top_p=top_p)
        outputs += output
        outputs_pos += output_pos
        last_indi = indi
    return outputs, outputs_pos


import json

if __name__ == '__main__':
    print("Look at the entropy")
    # from data_collection import CUR_DIR, PROB_META_DIR, spec_name, MODEL_NAME

    args = parse_arg()

    model_files = os.listdir(args.cur_dir)

    random.shuffle(model_files)
    model_files = model_files[:args.max_sample_num]
    print(f"total len of files: {len(model_files)}")
    entropies = []
    max_probs = []
    print(args.spec_name)
    if 'pegasus' in args.model_name:
        from transformers import PegasusTokenizer

        bpe_tokenizer = PegasusTokenizer.from_pretrained(args.model_name)
        EOS_TOK_IDs = [106, bpe_tokenizer.eos_token_id, 2]  # <n>
    elif 'gpt' in args.model_name:
Exemple #18
0
def main():
    args = util.parse_arg()

    time_string = datetime.now().strftime('%Y-%m-%d-%H-%M-%S')
    root_folder = os.path.join(
        args.save_folder,
        '%s_%s_%d' % (args.setting, time_string, os.getpid()))
    if not os.path.exists(root_folder):
        os.makedirs(root_folder)

    folder_ckpt = os.path.join(root_folder, 'ckpts')
    if not os.path.exists(folder_ckpt):
        os.makedirs(folder_ckpt)

    folder_summary = os.path.join(root_folder, 'summary')
    if not os.path.exists(folder_summary):
        os.makedirs(folder_summary)

    print('PID:', os.getpid())

    sys.path.append(os.path.join(os.path.dirname(__file__), 'settings'))
    setting = importlib.import_module(args.setting)

    #################################################################
    # start to define tensorflow operations
    #################################################################

    num_epochs = args.epochs or setting.num_epochs
    batch_size = args.batch_size or setting.batch_size
    sample_num = setting.sample_num
    step_val = setting.step_val
    rotation_range = setting.rotation_range
    rotation_range_val = setting.rotation_range_val
    scaling_range = setting.scaling_range
    scaling_range_val = setting.scaling_range_val
    jitter = setting.jitter
    jitter_val = setting.jitter_val
    pool_setting_train = None if not hasattr(
        setting, 'pool_setting_train') else setting.pool_setting_train

    # Prepare inputs
    print('{}-Preparing datasets...'.format(datetime.now()))
    data_train, label_train, data_val, label_val = data_util.load_whole_data(
        args.path_train, args.path_val)

    num_train = data_train.shape[0]
    point_num = data_train.shape[1]
    num_val = data_val.shape[0]
    print('{}-{:d}/{:d} training/validation samples.'.format(
        datetime.now(), num_train, num_val))

    ######################################################################
    # Placeholders
    indices = tf.placeholder(tf.int32, shape=(None, None, 2), name="indices")
    xforms = tf.placeholder(tf.float32, shape=(None, 3, 3), name="xforms")
    rotations = tf.placeholder(tf.float32,
                               shape=(None, 3, 3),
                               name="rotations")
    jitter_range = tf.placeholder(tf.float32, shape=(1), name="jitter_range")
    is_training = tf.placeholder(tf.bool, name='is_training')

    data_train_placeholder = tf.placeholder(data_train.dtype,
                                            data_train.shape,
                                            name='data_train')
    label_train_placeholder = tf.placeholder(tf.int64,
                                             label_train.shape,
                                             name='label_train')
    data_val_placeholder = tf.placeholder(data_val.dtype,
                                          data_val.shape,
                                          name='data_val')
    label_val_placeholder = tf.placeholder(tf.int64,
                                           label_val.shape,
                                           name='label_val')
    handle = tf.placeholder(tf.string, shape=[], name='handle')

    ######################################################################
    ######################################################################
    # Variables
    global_step = tf.Variable(0, trainable=False, name='global_step')

    ######################################################################

    dataset_train = tf.data.Dataset.from_tensor_slices((data_train_placeholder, label_train_placeholder)) \
        .apply(tf.contrib.data.batch_and_drop_remainder(batch_size)) \
        .shuffle(buffer_size=batch_size * 4) \
        .batch(batch_size) \
        .repeat(num_epochs)

    batch_num_per_epoch = math.floor(num_train / batch_size)

    iterator_train = dataset_train.make_initializable_iterator()
    batch_num = batch_num_per_epoch * num_epochs
    print('{}-{:d} training batches.'.format(datetime.now(), batch_num))

    dataset_val = tf.data.Dataset.from_tensor_slices((data_val_placeholder, label_val_placeholder)) \
        .apply(tf.contrib.data.batch_and_drop_remainder(batch_size)) \
        .batch(batch_size)
    batch_num_val = math.floor(num_val / batch_size)
    iterator_val = dataset_val.make_initializable_iterator()
    print('{}-{:d} testing batches per test.'.format(datetime.now(),
                                                     batch_num_val))

    iterator = tf.data.Iterator.from_string_handle(handle,
                                                   dataset_train.output_types)
    (pts_fts, labels) = iterator.get_next()

    pts_fts_sampled = tf.gather_nd(pts_fts,
                                   indices=indices,
                                   name='pts_fts_sampled')
    features_augmented = None
    if setting.data_dim > 3:
        points_sampled, features_sampled = tf.split(
            pts_fts_sampled, [3, setting.data_dim - 3],
            axis=-1,
            name='split_points_features')
        if setting.use_extra_features:
            if setting.with_normal_feature:
                if setting.data_dim < 6:
                    print('Only 3D normals are supported!')
                    exit()
                elif setting.data_dim == 6:
                    features_augmented = util.augment(features_sampled,
                                                      rotations)
                else:
                    normals, rest = tf.split(features_sampled,
                                             [3, setting.data_dim - 6])
                    normals_augmented = util.augment(normals, rotations)
                    features_augmented = tf.concat([normals_augmented, rest],
                                                   axis=-1)
            else:
                features_augmented = features_sampled
    else:
        points_sampled = pts_fts_sampled
    points_augmented = util.augment(points_sampled, xforms, jitter_range)

    net = model.Net(points=points_augmented,
                    features=features_augmented,
                    is_training=is_training,
                    setting=setting)
    logits = net.logits
    probs = tf.nn.softmax(logits, name='probs')
    predictions = tf.argmax(probs, axis=-1, name='predictions')

    labels_2d = tf.expand_dims(labels, axis=-1, name='labels_2d')
    labels_tile = tf.tile(labels_2d, (1, tf.shape(logits)[1]),
                          name='labels_tile')
    loss_op = tf.losses.sparse_softmax_cross_entropy(labels=labels_tile,
                                                     logits=logits)

    with tf.name_scope('metrics'):
        loss_mean_op, loss_mean_update_op = tf.metrics.mean(loss_op)
        t_1_acc_op, t_1_acc_update_op = tf.metrics.accuracy(
            labels_tile, predictions)
        t_1_per_class_acc_op, t_1_per_class_acc_update_op = tf.metrics.mean_per_class_accuracy(
            labels_tile, predictions, setting.num_class)
    reset_metrics_op = tf.variables_initializer([
        var for var in tf.local_variables()
        if var.name.split('/')[0] == 'metrics'
    ])

    #################################################################
    # summary
    _ = tf.summary.scalar('loss/train',
                          tensor=loss_mean_op,
                          collections=['train'])
    _ = tf.summary.scalar('t_1_acc/train',
                          tensor=t_1_acc_op,
                          collections=['train'])
    _ = tf.summary.scalar('t_1_per_class_acc/train',
                          tensor=t_1_per_class_acc_op,
                          collections=['train'])

    _ = tf.summary.scalar('loss/val', tensor=loss_mean_op, collections=['val'])
    _ = tf.summary.scalar('t_1_acc/val',
                          tensor=t_1_acc_op,
                          collections=['val'])
    _ = tf.summary.scalar('t_1_per_class_acc/val',
                          tensor=t_1_per_class_acc_op,
                          collections=['val'])
    #################################################################

    lr_exp_op = tf.train.exponential_decay(setting.learning_rate_base,
                                           global_step,
                                           setting.decay_steps,
                                           setting.decay_rate,
                                           staircase=True)
    lr_clip_op = tf.maximum(lr_exp_op, setting.learning_rate_min)
    _ = tf.summary.scalar('learning_rate',
                          tensor=lr_clip_op,
                          collections=['train'])

    reg_loss = setting.weight_decay * tf.losses.get_regularization_loss()

    if setting.optimizer == 'adam':
        optimizer = tf.train.AdamOptimizer(learning_rate=lr_clip_op,
                                           epsilon=setting.epsilon)
    elif setting.optimizer == 'momentum':
        optimizer = tf.train.MomentumOptimizer(learning_rate=lr_clip_op,
                                               momentum=setting.momentum,
                                               use_nesterov=True)

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_op = optimizer.minimize(loss_op + reg_loss,
                                      global_step=global_step)

    init_op = tf.group(tf.global_variables_initializer(),
                       tf.local_variables_initializer())

    saver = tf.train.Saver(max_to_keep=None)

    parameter_num = np.sum(
        [np.prod(v.shape.as_list()) for v in tf.trainable_variables()])
    print('{}-Parameter number: {:d}.'.format(datetime.now(), parameter_num))

    with tf.Session() as sess:
        summaries_op = tf.summary.merge_all('train')
        summaries_val_op = tf.summary.merge_all('val')
        summary_writer = tf.summary.FileWriter(folder_summary, sess.graph)

        sess.run(init_op)

        # Load the model
        if args.load_ckpt is not None:
            saver.restore(sess, args.load_ckpt)
            print('{}-Checkpoint loaded from {}!'.format(
                datetime.now(), args.load_ckpt))

        handle_train = sess.run(iterator_train.string_handle())
        handle_val = sess.run(iterator_val.string_handle())

        sess.run(iterator_train.initializer,
                 feed_dict={
                     data_train_placeholder: data_train,
                     label_train_placeholder: label_train,
                 })

        for batch_idx_train in range(batch_num):
            ######################################################################
            # Validation
            if (batch_idx_train % step_val == 0 and (batch_idx_train != 0 or args.load_ckpt is not None)) \
                    or batch_idx_train == batch_num - 1:
                sess.run(iterator_val.initializer,
                         feed_dict={
                             data_val_placeholder: data_val,
                             label_val_placeholder: label_val,
                         })
                filename_ckpt = os.path.join(folder_ckpt, 'iter')
                saver.save(sess, filename_ckpt, global_step=global_step)
                print('{}-Checkpoint saved to {}!'.format(
                    datetime.now(), filename_ckpt))

                sess.run(reset_metrics_op)
                for batch_idx_val in range(batch_num_val):
                    xforms_np, rotations_np = util.get_xforms(
                        batch_size,
                        rotation_range=rotation_range_val,
                        scaling_range=scaling_range_val,
                        order=setting.rotation_order)
                    sess.run(
                        [
                            loss_mean_update_op, t_1_acc_update_op,
                            t_1_per_class_acc_update_op
                        ],
                        feed_dict={
                            handle:
                            handle_val,
                            indices:
                            util.get_indices(
                                batch_size,
                                sample_num,
                                point_num,
                            ),
                            xforms:
                            xforms_np,
                            rotations:
                            rotations_np,
                            jitter_range:
                            np.array([jitter_val]),
                            is_training:
                            False,
                        })
                loss_val, t_1_acc_val, t_1_per_class_acc_val, summaries_val = sess.run(
                    [
                        loss_mean_op, t_1_acc_op, t_1_per_class_acc_op,
                        summaries_val_op
                    ])
                summary_writer.add_summary(summaries_val, batch_idx_train)
                print(
                    '{}-[Val  ]-Average:      Loss: {:.4f}  T-1 Acc: {:.4f}  T-1 mAcc: {:.4f}'
                    .format(datetime.now(), loss_val, t_1_acc_val,
                            t_1_per_class_acc_val))
                sys.stdout.flush()
            ######################################################################

            ######################################################################
            # Training
            offset = int(
                random.gauss(0, sample_num * setting.sample_num_variance))
            offset = max(offset, -sample_num * setting.sample_num_clip)
            offset = min(offset, sample_num * setting.sample_num_clip)
            sample_num_train = sample_num + offset
            xforms_np, rotations_np = util.get_xforms(
                batch_size,
                rotation_range=rotation_range,
                scaling_range=scaling_range,
                order=setting.rotation_order)
            sess.run(reset_metrics_op)
            sess.run(
                [
                    train_op, loss_mean_update_op, t_1_acc_update_op,
                    t_1_per_class_acc_update_op
                ],
                feed_dict={
                    handle:
                    handle_train,
                    indices:
                    util.get_indices(batch_size, sample_num_train, point_num,
                                     pool_setting_train),
                    xforms:
                    xforms_np,
                    rotations:
                    rotations_np,
                    jitter_range:
                    np.array([jitter]),
                    is_training:
                    True,
                })
            if batch_idx_train % 10 == 0:
                loss, t_1_acc, t_1_per_class_acc, summaries = sess.run([
                    loss_mean_op, t_1_acc_op, t_1_per_class_acc_op,
                    summaries_op
                ])
                summary_writer.add_summary(summaries, batch_idx_train)
                print(
                    '{}-[Train]-Iter: {:06d}  Loss: {:.4f}  T-1 Acc: {:.4f}  T-1 mAcc: {:.4f}'
                    .format(datetime.now(), batch_idx_train, loss, t_1_acc,
                            t_1_per_class_acc))
                sys.stdout.flush()
            ######################################################################
        print('{}-Done!'.format(datetime.now()))
Exemple #19
0
# Author: Nek-12 on Github:
# https://github.com/Nek-12
# Created on Mar 18, 2021


use_auth = None
use_cp1251 = None
args = sys.argv
enc = None
vk = None
data = []


try:
    use_cp1251, use_auth = parse_arg(args[1]), parse_arg(args[2])
except ValueError as exc:
    print(f"Неверный аргумент: {exc.args[0]}. Возможны только аргументы {POSSIBLE_ARGS}")
    exit(-1)
except IndexError:
    pass

try:
    enc = CnM.from_path(INPUT_FNAME).best().first().encoding
    if enc != 'utf-8':
        print("\n\n", ENC_WARN, "\n\n")
        if use_cp1251 is None:
            use_cp1251 = yes_no("Использовать cp1251 вместо текущей кодировки?")
        if use_cp1251:
            enc = 'cp1251'
    # parse the file with group IDs