Ejemplo n.º 1
0
def remove_user_from_group(conn, username, group):
    conn.bind()
    user = '******' % (username, get_configs('ldap')['account_base'])
    group = 'cn=%s,%s' % (group, get_configs('ldap')['group_base'])
    conn.modify(group, {'uniqueMember': (MODIFY_DELETE, user)})
    print conn.result
    conn.unbind()
Ejemplo n.º 2
0
def add_group(conn, groupname, users):
    group = 'cn=%s,%s' % (groupname, get_configs('ldap')['group_base'])
    users_dn = map(
        lambda x: 'uid=%s,%s' % (x, get_configs('ldap')['account_base']),
        users)
    conn.bind()
    conn.add(group, ['groupOfUniqueNames', 'top'], {
        'uniqueMember': users_dn,
        'cn': groupname
    })
    print conn.result
    conn.unbind()
Ejemplo n.º 3
0
def get_admin_conn():
    conf = get_configs('ldap')
    try:
        return Connection(Server(conf['ldap_server']), conf['admin_dn'],
                          conf['admin_password'])
    except Exception:
        return False
Ejemplo n.º 4
0
def main(_):
  """
  The model specified command line arg --model_dir is applied to every data
  point in --test_datafile and the model output is sent to --output. The unix
  command 'paste' can be used to stich the input file and output together.
  e.g.,
  $ classifiy_data.py --config=train.conf --test_datafile=test.dat > output.dat
  $ paste -d ' ' test.dat output.dat > input_and_output.dat
  """
  configs.DEFINE_string('test_datafile',None,'file with test data')
  configs.DEFINE_string('time_field','date','fields used for dates/time')
  configs.DEFINE_string('print_start','190001','only print data on or after')
  configs.DEFINE_string('print_end','999912','only print data on or before')
  configs.DEFINE_integer('num_batches',None,'num_batches overrride')

  config = configs.get_configs()

  if config.test_datafile is None:
     config.test_datafile = config.datafile

  batch_size = 1
  data_path = model_utils.get_data_path(config.data_dir,config.test_datafile)

  # print("Loading data %s"%data_path)

  dataset = BatchGenerator(data_path, config,
                             batch_size=batch_size,
                             num_unrollings=config.num_unrollings)

  num_data_points = dataset.num_batches
  if config.num_batches is not None:
     num_data_points = config.num_batches

  #print("num_batches = ", num_data_points)

  tf_config = tf.ConfigProto( allow_soft_placement=True,
                                log_device_placement=False )

  with tf.Graph().as_default(), tf.Session(config=tf_config) as session:

    #print("Loading model.")

    model = model_utils.get_trained_model(session, config, verbose=False)

    for i in range(num_data_points):

      batch = dataset.next_batch()
      preds = model.step(session, batch)
      seq_len = get_seq_length(batch)
      key, date = get_key_and_date(batch, seq_len-1)

      if (date < config.print_start or date > config.print_end):
        continue

      score  = get_score(config, preds, seq_len-1)
      target = get_target(config, batch, seq_len-1)

      print("%s %s %.6f %.6f %d" % (key, date, score, target, seq_len))
Ejemplo n.º 5
0
def get_administrated_groups(conn, username):
    # return group which leader belongs or return False
    conn.bind()
    is_leader = False
    leaders_group_name = get_configs('ldap')['leaders_group_name']
    user = '******' % (username, get_configs('ldap')['account_base'])
    conn.search(search_base=leaders_group_name,
                search_filter='(objectClass=groupOfUniqueNames)',
                search_scope=SUBTREE,
                attributes=['uniqueMember'])
    for entry in conn.response:
        if user in entry['attributes']['uniqueMember']:
            is_leader = True
    if is_leader:
        return get_groups_with_user(conn=conn,
                                    user_dn=user,
                                    leaders_group_dn=leaders_group_name)
    conn.unbind()
    return is_leader
Ejemplo n.º 6
0
def auth_login(username, password):
    conf = get_configs('ldap')
    user = '******' % (username, conf['account_base'])
    try:
        return Connection(Server(conf['ldap_server']),
                          user,
                          password,
                          auto_bind=True)
    except Exception:
        return False
Ejemplo n.º 7
0
def delete_user(conn, username):
    conn.bind()
    user = '******' % (username, get_configs('ldap')['account_base'])
    conn.delete(user)
    groups = get_groups_with_user(conn=conn, user_dn=user)
    # REMOVE user from groups.
    if groups:
        conn.bind()
        conn.modify(groups, {'uniqueMember': (MODIFY_DELETE, user)})
    conn.unbind()
Ejemplo n.º 8
0
def change_group_name(conn, old, new):
    if old and new:
        if get_group(conn, new):
            return False
        conn.bind()
        old_dn = 'cn=%s,%s' % (old, get_configs('ldap')['group_base'])
        conn.modify_dn(old_dn, "cn=" + new)
        print conn.result
        conn.unbind()
        return True
Ejemplo n.º 9
0
def get_groups_with_user(conn,
                         user_dn=None,
                         leaders_group_dn=None,
                         username=None):
    # search for groups containing this user
    conn.bind()
    groups = []
    conn.search(search_base=get_configs('ldap')['group_base'],
                search_filter='(objectClass=groupOfUniqueNames)',
                search_scope=SUBTREE,
                attributes=['uniqueMember'])
    if not user_dn:
        if not username:
            return None
        user_dn = 'uid=%s,%s' % (username, get_configs('ldap')['account_base'])
    for group in conn.response:
        if user_dn in group['attributes']['uniqueMember'] and \
        group['dn'] != leaders_group_dn:
            groups.append(group['dn'])
    conn.unbind()
    return groups
Ejemplo n.º 10
0
def change_passwd(username, password, conn=None):
    if not conn:
        conn = get_admin_conn()
    conn.bind()
    user = '******' % (username, get_configs('ldap')['account_base'])
    conn.modify(user, {
        'userPassword':
        ('MODIFY_REPLACE', [hashed(HASHED_SALTED_SHA, password)])
    })
    print conn.result
    conn.unbind()
    return True
Ejemplo n.º 11
0
def get_group_users(conn, group_name):
    group = 'cn=%s,%s' % (group_name, get_configs('ldap')['group_base'])
    conn.bind()
    conn.search(search_base=group,
                search_filter='(objectClass=groupOfUniqueNames)',
                search_scope=SUBTREE,
                attributes=['uniqueMember'])
    response = conn.response
    if not response:
        return []
    users = conn.response[0]['attributes']['uniqueMember']
    conn.unbind()
    return users
Ejemplo n.º 12
0
def filter_admin_users(conn, users):
    conn.bind()
    conn.search(search_base=get_configs('ldap')['leaders_group_name'],
                search_filter='(objectClass=*)',
                search_scope=SUBTREE,
                attributes=['uniqueMember'])
    admins = conn.response[0]['attributes']['uniqueMember']
    admin_users = []
    if not users:
        return None
    for user in users:
        if user in admins:
            admin_users.append(user)
    return sorted(admin_users)
Ejemplo n.º 13
0
    def __init__(self, adj_list: dict = None) -> None:
        '''
        adj_list: dict with keys as Node types and values as set of Nodes types
        '''
        self.adj_list = adj_list
        self.configs = get_configs()

        self.reprs = {
            'bipartite': dict(),
            'bfs': dict(),
            'dfs': dict(),
            'bfs_tree': dict(),
            'dfs_tree': dict()
        }
Ejemplo n.º 14
0
def get_group(conn, group=None):
    conn.bind()
    conf = get_configs('ldap')
    if group:
        filter = '(cn=%s)' % group
    else:
        filter = '(objectClass=*)'
    entry = conn.extend.standard.paged_search(conf['group_base'],
                                              filter,
                                              search_scope=SUBTREE)
    if entry:
        group_result = [item['dn'] for item in entry]
    else:
        group_result = []
    conn.unbind()
    return group_result
Ejemplo n.º 15
0
def list_users(conn=None, user=None):
    if not conn:
        conn = get_admin_conn()
    conn.bind()
    if user:
        filter = '(uid=%s)' % user
    else:
        filter = '(objectClass=inetOrgPerson)'
    entry_generator = conn.extend.standard.paged_search(
        search_base=get_configs('ldap')['account_base'],
        search_filter=filter,
        search_scope=SUBTREE,
        paged_size=5,
        generator=True)
    # for (entry, counter) in zip(entry_generator, range(5)):
    #     print(entry['dn'])
    # return entry_generator ### unbind connectin causes generator error if any operation caused.
    entry = [item['dn'] for item in entry_generator]
    conn.unbind()
    return entry
Ejemplo n.º 16
0
def add_user(conn, username, groupname, mail=None):
    # TODO: Either modify groups from objectClass: groupOfUniqueNames to
    # objectClass: posixGroup with a gidNumber: 10000
    # or move all groups to groupOfUniqueNames and manage users
    # with uniqueMember: uid=...
    # gidNumber: 1 is a fake one.

    # param username: str name of a user.
    # param groupname: str name of a group.

    conn.bind()
    user = '******' % (username, get_configs('ldap')['account_base'])
    group = get_group(conn, groupname)
    conn.bind()
    conn.add(
        user, ['inetOrgPerson', 'posixAccount', 'shadowAccount', 'top'], {
            'sn': username,
            'displayName': username,
            'cn': username,
            'uidNumber': 1,
            'gidNumber': 1,
            'givenName': username,
            'homeDirectory': '/home/' + username,
            'uid': username
        })
    print(conn.result)
    # Add user to group:
    if group:
        # TODO: add support for user modify gidNumber for
        # none groupOfUniqueNames groups:
        # conn.modify(user, {'gidNumber': (MODIFY_ADD, [item['']])})
        # conn.extend.microsoft.add_members_to_groups([user],[group]) ##invalid
        conn.modify(group, {'uniqueMember': ('MODIFY_ADD', [user])})
        print(conn.result)
    if mail:
        conn.modify(user, {'mail': ('MODIFY_REPLACE', mail)})
    conn.unbind()
Ejemplo n.º 17
0
def main():
    logger.info(SEPARATOR)
    configs = get_configs()

    # Run data preprocess
    if configs.data_preprocess_active:
        logger.info(configs)
        preprocess = DataPreprocess(configs)
        preprocess.data_preprocess()
        logger.info("Data preprocess finished!")
    # Run train model
    if configs.da_rnn_model_active:
        logger.info(configs)
        da_rnn_model = DaRnnModel(configs)
        da_rnn_model.run()
        logger.info("Da_rnnModel finished!")
    if configs.xgboost_gridsearch_model_active:
        xgboost_model = XgboostGridSearchModel(configs)
        xgboost_model.run()
        logger.info("XGboost finished!")
    if configs.tcn_big_file_model_active:
        tcn_model = TcnModel(configs)
        tcn_model.run()
        logger.info("TcnModel finished!")
Ejemplo n.º 18
0
    global sess
    print(config)
    model = LSTMModel(config)
    sess = tf.InteractiveSession()
    sess.run(tf.global_variables_initializer())
    return train(train_set, valid_set, test_set, model)


EPOCHS_NUM = 20
BATCH_SIZE = 20
NUM_STEPS = 20

fig, ax = plt.subplots()
ax.set_yticks(np.arange(0, 10, 0.5))
ax.grid(which='both')
sess = tf.InteractiveSession()
configs = get_configs(ALPHABET_SIZE)
series = []
xs = list(range(1, EPOCHS_NUM + 1))
for label, config in configs:
    tf.reset_default_graph()
    perplexities = start(config)
    ax.plot(xs, perplexities, label=label)
    series.append(perplexities)

legend = ax.legend(loc='upper center', shadow=True)

plt.show()

plt.savefig("results")
Ejemplo n.º 19
0
else:
  device = torch.device("cpu")

if __name__ == "__main__":
    parser = argparse.ArgumentParser()
    parser.add_argument('--input_folder', type=str, default="./data/data_vn/arg_data")
    parser.add_argument('--min_word_freq', type=int, default=2)
    parser.add_argument('--batch_size', type=int, default=64)
    parser.add_argument('--wv_file', type=str, default='./pretrained_embedding/word2vec/baomoi.vn.model.bin')

    
    corpus = Corpus(
        parser.parse_args()
    )

    configs_event = get_configs(corpus, device)
    configs_arg = get_configs_arguments(corpus, device)


    model_event = Model_ED(**configs_event['cnn_seq+w2v'])

    model_arg = Model_EA(**configs_arg['cnn+w2v'])

    model_event.load_state('./pretrained_model/ace/cnn_seq+w2v_ED.pt')
    model_arg.load_state('./pretrained_model/cnn+w2v_EA.pt')


    trainer = Trainer(
        model_event=model_event,
        model_arg=model_arg,
        data=corpus,
Ejemplo n.º 20
0
import time
from configs import get_configs
g = {
    'a': {'b', 'c', 'd'},
    'b': {'a', 'e'},
    'c': {'a', 'e', 'd', 'f'},
    'd': {'a', 'c', 'h'},
    'e': {'b', 'c', 'g'},
    'f': {'c', 'g', 'h'},
    'g': {'e', 'f'},
    'h': {'f', 'd'}
}

depths = [['a'], ['b', 'c', 'd'], ['e', 'f'], ['g', 'h']]

configs = get_configs()


def plot_graph_2():
    win = GraphWin('Graph Plot', configs.win_width, configs.win_height)
    win.setBackground('white')

    edges = [{'a', 'b'}, {'a', 'c'}, {'a', 'd'}, {'b', 'e'}, {'c', 'e'},
             {'c', 'd'}, {'c', 'f'}, {'d', 'h'}, {'e', 'g'}, {'f', 'g'},
             {'f', 'h'}]
    root = 'a'

    adj_list = utils.to_adj_list(edges)
    print(adj_list)
    #g = Graph(adj_list).default(root='a')
    #g = Graph(adj_list).bfs_tree(root='a')
Ejemplo n.º 21
0
from tensorflow.python.platform import gfile
from batch_generator import BatchGenerator
"""
Entry point and main loop for train_net.py. Uses command line arguments to get
model and training specification (see config.py).
"""
configs.DEFINE_string("train_datafile", None, "Training file")
configs.DEFINE_float("lr_decay", 0.9, "Learning rate decay")
configs.DEFINE_float("initial_learning_rate", 1.0, "Initial learning rate")
configs.DEFINE_float("validation_size", 0.0, "Size of validation set as %")
configs.DEFINE_integer("passes", 1, "Passes through day per epoch")
configs.DEFINE_integer("max_epoch", 0, "Stop after max_epochs")
configs.DEFINE_integer("early_stop", None, "Early stop parameter")
configs.DEFINE_integer("seed", None, "Seed for deterministic training")

config = configs.get_configs()

datafile = config.train_datafile if config.train_datafile else config.datafile

train_path = model_utils.get_data_path(config.data_dir, datafile)

cache_path = os.path.splitext(train_path)[0] + '.cache'

print("Loading training data ...")

end_date = config.end_date

############################################################################
#   If cached data doesn't exist, build it
############################################################################
if not os.path.exists(cache_path) or config.use_cache is False:
Ejemplo n.º 22
0
def main(_):
    """
  The model specified command line arg --model_dir is applied to every data
  point in --test_datafile and the model output is sent to --output. The unix
  command 'paste' can be used to stich the input file and output together.
  e.g.,
  $ classifiy_data.py --config=train.conf --test_datafile=test.dat --output=output.dat
  $ paste -d ' ' test.dat output.dat > input_and_output.dat
  """
    configs.DEFINE_string('test_datafile', None, 'file with test data')
    configs.DEFINE_string('output', 'preds.dat', 'file for predictions')
    configs.DEFINE_string('time_field', 'date', 'fields used for dates/time')
    configs.DEFINE_string('print_start', '190001',
                          'only print data on or after')
    configs.DEFINE_string('print_end', '210012',
                          'only print data on or before')
    configs.DEFINE_integer('min_test_k', 1, 'minimum seq length classified')
    configs.DEFINE_integer('num_batches', None, 'num_batches overrride')

    config = configs.get_configs()

    if config.test_datafile is None:
        config.test_datafile = config.datafile

    batch_size = 1
    data_path = model_utils.get_data_path(config.data_dir,
                                          config.test_datafile)

    print("Loading data %s" % data_path)

    dataset = BatchGenerator(data_path,
                             config,
                             batch_size=batch_size,
                             num_unrollings=config.num_unrollings)

    num_data_points = dataset.num_batches
    if config.num_batches is not None:
        num_data_points = config.num_batches

    print("num_batches = ", num_data_points)

    tf_config = tf.ConfigProto(allow_soft_placement=True,
                               log_device_placement=False)

    with tf.Graph().as_default(), tf.Session(config=tf_config) as session:

        print("Loading model.")

        model = model_utils.get_trained_model(session, config)

        stats = dict()
        key = 'ALL'
        stats[key] = list()

        with open(config.output, "w") as outfile:

            for i in range(num_data_points):

                batch = dataset.next_batch()
                preds = model.step(session, batch)
                seq_len = get_seq_length(batch)
                start = seq_len - 1

                if seq_len < config.num_unrollings:
                    continue
                #if config.nn_type != 'rnn' and seq_len < config.num_unrollings:
                #  continue
                #elif config.nn_type == 'rnn' and classify_entire_seq(batch):
                #  start = config.min_test_k - 1

                for i in range(start, seq_len):
                    key, date = get_key_and_date(batch, i)
                    if (date < config.print_start or date > config.print_end):
                        continue
                    prob = get_pos_prob(config, preds, i)
                    target = get_target(batch, i)
                    outfile.write("%s %s "
                                  "%.4f %.4f %d %d\n" %
                                  (key, date, 1.0 - prob, prob, target, i + 1))
                    pred = +1.0 if prob >= 0.5 else 0.0
                    error = 0.0 if (pred == target) else 1.0
                    tpos = 1.0 if (pred == 1 and target == 1) else 0.0
                    tneg = 1.0 if (pred == 0 and target == 0) else 0.0
                    fpos = 1.0 if (pred == 1 and target == 0) else 0.0
                    fneg = 1.0 if (pred == 0 and target == 1) else 0.0
                    # print("pred=%.2f target=%.2f tp=%d tn=%d fp=%d fn=%d"%(pred,target,tp,tn,fp,fn))
                    curstat = {
                        'error': error,
                        'tpos': tpos,
                        'tneg': tneg,
                        'fpos': fpos,
                        'fneg': fneg
                    }
                    if date not in stats:
                        stats[date] = list()
                    stats[date].append(curstat)
                    stats['ALL'].append(curstat)

        print_summary_stats(stats)
Ejemplo n.º 23
0
        output_hidden_states=False,
    )

    bert_model.cuda()

    parser = argparse.ArgumentParser()
    parser.add_argument('--input_folder',
                        type=str,
                        default="./data/data_ace/arg_data")
    parser.add_argument('--min_word_freq', type=int, default=2)
    parser.add_argument('--batch_size', type=int, default=32)
    parser.add_argument('--wv_file', type=str, default=None)

    corpus = Corpus(parser.parse_args())

    configs = get_configs(corpus, device)
    model_name = "bilstm"
    # model = Model_ED(**configs[model_name])
    model = Model_ED_Bert(**configs[model_name], bert_model=bert_model)
    # model.load_state('/content/drive/My Drive/EE/pretrained_model/cnn_seq+w2v_ED.pt')

    trainer = Trainer(model=model,
                      data=corpus,
                      optimizer_cls=Adam,
                      loss_fn_cls=nn.CrossEntropyLoss,
                      device=device,
                      tokenizer=tokenizer)

    trainer.train_live(20)
    # model_names = ['cnn_seq+w2v']
    # num_epochs = 20
Ejemplo n.º 24
0
def main():
    parser = get_parser()
    print(parser)
    args = parser.parse_args()
    print(args)

    # load data
    opts = get_configs(args.dataset)
    print(opts)
    pos_ratio = torch.FloatTensor(opts["pos_ratio"])
    w_p = (1 - pos_ratio).exp().cuda()
    w_n = pos_ratio.exp().cuda()

    trainset, testset = get_dataset(opts)

    train_loader = torch.utils.data.DataLoader(
        trainset,
        batch_size=args.train_batch_size,
        shuffle=True,
        num_workers=args.train_workers)
    test_loader = torch.utils.data.DataLoader(testset,
                                              batch_size=args.test_batch_size,
                                              shuffle=False,
                                              num_workers=args.test_workers)

    # path to save models
    if not os.path.isdir(args.model_dir):
        print("Make directory: " + args.model_dir)
        os.makedirs(args.model_dir)

    # prefix of saved checkpoint
    model_prefix = args.model_dir + '/' + args.model_prefix

    # define the model: use ResNet50 as an example
    if args.arch == "resnet50":
        from resnet import resnet50
        model = resnet50(pretrained=True, num_labels=opts["num_labels"])
        model_prefix = model_prefix + "_resnet50"
    elif args.arch == "resnet101":
        from resnet import resnet101
        model = resnet101(pretrained=True, num_labels=opts["num_labels"])
        model_prefix = model_prefix + "_resnet101"
    else:
        raise NotImplementedError("To be implemented!")

    if args.start_epoch != 0:
        resume_model = torch.load(args.resume)
        resume_dict = resume_model.state_dict()
        model_dict = model.state_dict()
        resume_dict = {k: v for k, v in resume_dict.items() if k in model_dict}
        model_dict.update(resume_dict)
        model.load_state_dict(model_dict)

    # print(model)
    model.cuda()

    if args.optimizer == 'Adam':
        optimizer = optim.Adam(model.parameters(), lr=args.learning_rate)
    elif args.optimizer == 'SGD':
        optimizer = optim.SGD(model.parameters(),
                              lr=args.learning_rate,
                              momentum=args.momentum,
                              weight_decay=args.weight_decay)
    else:
        raise NotImplementedError("Not supported yet!")

    # training the network
    model.train()

    # attention map size
    w1 = 7
    h1 = 7
    grid_l = generate_flip_grid(w1, h1)

    w2 = 6
    h2 = 6
    grid_s = generate_flip_grid(w2, h2)

    # least common multiple
    lcm = w1 * w2

    criterion = SigmoidCrossEntropyLoss
    criterion_mse = nn.MSELoss(size_average=True)
    for epoch in range(args.start_epoch, args.epoch_max):
        epoch_start = time.clock()
        if not args.stepsize == 0:
            adjust_learning_rate(optimizer, epoch, args)
        for step, batch_data in enumerate(train_loader):
            batch_images_lo = batch_data[0]
            batch_images_lf = batch_data[1]
            batch_images_so = batch_data[2]
            batch_images_sf = batch_data[3]
            batch_labels = batch_data[4]

            batch_labels[batch_labels == -1] = 0

            batch_images_l = torch.cat((batch_images_lo, batch_images_lf))
            batch_images_s = torch.cat((batch_images_so, batch_images_sf))
            batch_labels = torch.cat(
                (batch_labels, batch_labels, batch_labels, batch_labels))

            batch_images_l = batch_images_l.cuda()
            batch_images_s = batch_images_s.cuda()
            batch_labels = batch_labels.cuda()

            inputs_l = Variable(batch_images_l)
            inputs_s = Variable(batch_images_s)
            labels = Variable(batch_labels)

            output_l, hm_l = model(inputs_l)
            output_s, hm_s = model(inputs_s)

            output = torch.cat((output_l, output_s))
            loss = criterion(output, labels, w_p, w_n)

            # flip
            num = hm_l.size(0) // 2

            hm1, hm2 = hm_l.split(num)
            flip_grid_large = grid_l.expand(num, -1, -1, -1)
            flip_grid_large = Variable(flip_grid_large, requires_grad=False)
            flip_grid_large = flip_grid_large.permute(0, 2, 3, 1)
            hm2_flip = F.grid_sample(hm2,
                                     flip_grid_large,
                                     mode='bilinear',
                                     padding_mode='border')
            flip_loss_l = F.mse_loss(hm1, hm2_flip)

            hm1_small, hm2_small = hm_s.split(num)
            flip_grid_small = grid_s.expand(num, -1, -1, -1)
            flip_grid_small = Variable(flip_grid_small, requires_grad=False)
            flip_grid_small = flip_grid_small.permute(0, 2, 3, 1)
            hm2_small_flip = F.grid_sample(hm2_small,
                                           flip_grid_small,
                                           mode='bilinear',
                                           padding_mode='border')
            flip_loss_s = F.mse_loss(hm1_small, hm2_small_flip)

            # scale loss
            num = hm_l.size(0)
            hm_l = F.upsample(hm_l, lcm)
            hm_s = F.upsample(hm_s, lcm)
            scale_loss = F.mse_loss(hm_l, hm_s)

            losses = loss + flip_loss_l + flip_loss_s + scale_loss

            optimizer.zero_grad()
            losses.backward()
            optimizer.step()

            if (step) % args.display == 0:
                print('epoch: {},\ttrain step: {}\tLoss: {:.6f}'.format(
                    epoch + 1, step, losses.data[0]))
                print('\tcls loss: {:.4f};\tflip_loss_l: {:.4f}'
                      '\tflip_loss_s: {:.4f};\tscale_loss: {:.4f}'.format(
                          loss.data[0], flip_loss_l.data[0],
                          flip_loss_s.data[0], scale_loss.data[0]))

        epoch_end = time.clock()
        elapsed = epoch_end - epoch_start
        print("Epoch time: ", elapsed)

        # test
        if (epoch + 1) % args.snapshot == 0:

            model_file = model_prefix + '_epoch{}.pth'
            print("Saving model to " + model_file.format(epoch + 1))
            torch.save(model, model_file.format(epoch + 1))

            if args.test:
                model.eval()
                test_start = time.clock()
                test(model, test_loader, epoch + 1)
                test_time = (time.clock() - test_start)
                print("test time: ", test_time)
                model.train()

    final_model = model_prefix + '_final.pth'
    print("Saving model to " + final_model)
    torch.save(model, final_model)
    model.eval()
    test(model, test_loader, epoch + 1)
Ejemplo n.º 25
0
def main(_):
    """
  """
    configs.DEFINE_string('test_datafile', None, 'file with test data')
    configs.DEFINE_string('output', 'preds.dat', 'file for predictions')
    configs.DEFINE_string('time_field', 'date', 'fields used for dates/time')
    configs.DEFINE_string('print_start', '190001',
                          'only print data on or after')
    configs.DEFINE_string('print_end', '210012',
                          'only print data on or before')
    configs.DEFINE_string('factor_name', None,
                          'Name of factor if nn_type=factor')
    configs.DEFINE_integer('min_test_k', 1, 'minimum seq length classified')
    configs.DEFINE_integer('num_batches', None, 'num_batches overrride')

    config = configs.get_configs()

    factor_name = config.factor_name
    assert (factor_name is not None)

    if config.test_datafile is None:
        config.test_datafile = config.datafile
    batch_size = 1
    num_unrollings = config.num_unrollings
    data_path = model_utils.get_data_path(config.data_dir,
                                          config.test_datafile)
    filename = data_path

    print("Loading data %s" % data_path)
    if not os.path.isfile(filename):
        raise RuntimeError("The data file %s does not exists" % filename)
    data = pd.read_csv(filename,
                       sep=' ',
                       dtype={
                           config.key_field: str,
                           'date': str
                       })
    if config.end_date is not None:
        data = data.drop(data[data['date'] > str(config.end_date)].index)

    num_data_points = len(data)

    params = dict()

    print("num data points = ", num_data_points)

    stats = dict()
    key = 'ALL'
    stats[key] = list()

    with open(config.output, "w") as outfile:

        last_key = ''
        seq_len = 0

        for i in range(num_data_points):
            key = get_value(data, config.key_field, i)
            date = get_value(data, 'date', i)
            seq_len = seq_len + 1 if key == last_key else 1
            last_key = key
            if (str(date) < config.print_start
                    or str(date) > config.print_end):
                continue
            if seq_len < config.min_test_k:
                continue
            prob = get_value(data, factor_name, i)
            out = get_value(data, config.target_field, i)
            target = (out + 1.0) / 2.0
            k = min(seq_len, config.num_unrollings)
            outfile.write("%s %s "
                          "%.4f %.4f %d %d\n" %
                          (key, date, 1.0 - prob, prob, target, k))
            pred = +1.0 if prob >= 0.5 else 0.0
            error = 0.0 if (pred == target) else 1.0
            tpos = 1.0 if (pred == 1 and target == 1) else 0.0
            tneg = 1.0 if (pred == 0 and target == 0) else 0.0
            fpos = 1.0 if (pred == 1 and target == 0) else 0.0
            fneg = 1.0 if (pred == 0 and target == 1) else 0.0
            # print("pred=%.2f target=%.2f tp=%d tn=%d fp=%d fn=%d"%(pred,target,tp,tn,fp,fn))
            rec = {
                'error': error,
                'tpos': tpos,
                'tneg': tneg,
                'fpos': fpos,
                'fneg': fneg
            }
            if date not in stats:
                stats[date] = list()
            stats[date].append(rec)
            stats['ALL'].append(rec)

    print_summary_stats(stats)
Ejemplo n.º 26
0
def main():

    # params = nni.get_next_parameter()

    parser = argparse.ArgumentParser()
    parser.add_argument('--mode', type=str, required=True)
    parser.add_argument('--config_file', type=str, required=True)
    parser.add_argument('--log', type=bool, default=True)
    parser.add_argument('--logbook', type=str, default='log.txt')
    logger = get_logger()
    parser = get_configs(parser, logger)
    opts = parser.parse_args()

    red = colorama.Fore.RED
    green = colorama.Fore.GREEN
    white = colorama.Fore.WHITE
    cyan = colorama.Fore.CYAN
    reset = colorama.Style.RESET_ALL
    bright = colorama.Style.BRIGHT
    dim = colorama.Style.DIM

    seed = opts.seed
    # seed = params['seed']
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed_all(seed)

    img_path = opts.img_input_dir
    anot_path = opts.anot_input_dir
    output_dir = opts.output_dir

    local_time = time.strftime("%m%d_%H%M%S", time.localtime())
    dir2save = '%s/%s_seed%s/' % (output_dir, local_time, seed)
    gen_num = opts.gen_num
    dir2gen = '%sran_gen/' % (dir2save)

    try:
        os.makedirs(dir2save)
        os.makedirs(dir2gen)
        shutil.copyfile('%s/%s' % (os.getcwd(), opts.config_file), '%s/%s' % (dir2save, opts.config_file))
        shutil.copyfile('%s/my_models.py' % os.getcwd(), '%s/my_models.py' % dir2save)
        shutil.copyfile('%s/run.py' % os.getcwd(), '%s/run.py' % dir2save)
        shutil.copyfile('%s/training_functions.py' % os.getcwd(), '%s/training_functions.py' % dir2save)
        shutil.copyfile('%s/utils_functions.py' % os.getcwd(), '%s/utils_functions.py' % dir2save)
    except OSError:
        raise Exception("Files ERROR!")
    if opts.log:
        logbook = opts.logbook
        logpath = dir2save + logbook
        loghandler = logging.FileHandler(filename=logpath, mode="a", encoding="utf-8")
        loghandler.setLevel(logging.INFO)
        logbook_formatter = LogbookFormatter(fmt="[%(asctime)s] %(message)s", datefmt="%Y-%m-%d %H:%M:%S")
        loghandler.setFormatter(logbook_formatter)
        logger.addHandler(loghandler)

    ### can't Loading Weights at present
    weights2load = 0
    G_weights2load = ''
    D_weights2load = ''
    ###

    Gs = []
    Zs = []
    Ds = []
    reals = []
    masks = []
    noises = []
    NoiseWeight = []

    errD2plot = []
    errG2plot = []

    mode = opts.mode
    channels = opts.channels
    kernel_size = opts.kernel_size
    stride = opts.stride

    if_padding = opts.if_padding
    if_lazy = opts.if_lazy

    G_num_layer = opts.G_num_layer
    D_num_layer = opts.D_num_layer
    if mode == 'f':
        weight4style = opts.weight4style
    scale_base = opts.scale_base
    # scales = opts.scales
    scales = modify_scales(anot_path, scale_base)
    logger.info('-' * 80)
    logger.info(green + '[INFO]: scales are set to %s' % scales + reset)

    out_channels = opts.out_channels

    lr_g = opts.lr_g
    lr_d = opts.lr_d

    iters_list = [int(i) for i in opts.iters_list]
    D_steps = opts.D_steps
    G_steps = opts.G_steps
    lambda_grad = opts.lambda_grad
    n_segments = opts.n_segments
    compactness = opts.compactness
    sigma = opts.sigma
    start_label = opts.start_label

    device = torch.device("cuda:0")

    alpha4rec_ini = opts.alpha4rec
    alpha4cos_ini = opts.alpha4cos
    alpha4vgg_ini = opts.alpha4vgg
    p_loss = VGGPerceptualLoss(resize=False, device=device) if alpha4vgg_ini != 0 else 0
    ###
    # factor4rec = calc_factor('rec', scales)
    # factor4cos = calc_factor('cos', scales)
    # factor4vgg = calc_factor('vgg', scales)
    ###

    noise_weight = opts.noise_weight
    noise_weight_ini = noise_weight

    p4flip = opts.p4flip

    torch.backends.cudnn.benchmark = True
    # amp.register_float_function(torch, 'sigmoid')

    reals, masks = get_reals(mode, img_path, anot_path, scales, scale_base, reals, channels, masks)
    reals, masks = reals[::-1], masks[::-1]

    reals_b, reals_fa, masks_b, masks_f = [], [], [], []
    for _ in reals:
        reals_b.append(_[1])
        reals_fa.append(_[3])
    for _ in masks:
        masks_b.append(_[0])
        masks_f.append(_[1])

    for scale_num in range(scales):
        outfile_dir = '%s%s/' % (dir2save, scale_num)
        try:
            os.makedirs(outfile_dir)
        except OSError:
            raise Exception("Files ERROR!")
        _, __, ___, ____, _____ = reals[scale_num][0], reals[scale_num][1], reals[scale_num][2], reals[scale_num][3], masks_f[scale_num]
        plt.imsave('%s/real_original.png' %  (outfile_dir), convert_image_np(_), vmin=0, vmax=1)
        plt.imsave('%s/real_background.png' %  (outfile_dir), convert_image_np(__), vmin=0, vmax=1)
        plt.imsave('%s/real_foregrounds.png' %  (outfile_dir), convert_image_np(___), vmin=0, vmax=1)
        plt.imsave('%s/real_foreground_a.png' %  (outfile_dir), convert_image_np(____), vmin=0, vmax=1)
        scipy.misc.toimage(convert_image_np(_____[:, 0, :, :][None, :, :, :])).save('%s/mask_f.png' %  (outfile_dir))
    torch.save(reals_fa, dir2save+'reals_f.pth')
    torch.save(reals_b, dir2save+'reals_b.pth')
    torch.save(masks_f, dir2save+'masks_f.pth')
    logger.info('-' * 80)
    logger.info(green + '[INFO]: data prepared!' + reset)
    logger.info('-' * 80)
    torch.cuda.synchronize()
    start_time = time.time()
    logger.info(green + '[INFO]: training starts at %s' % time.strftime("%H:%M:%S", time.localtime()) + reset)
    logger.info('-' * 80)

    for scale_num in range(scales):

        iters = iters_list[scale_num]
        outfile_dir = '%s%s/' % (dir2save, scale_num)
        real_curr = reals[scale_num]
        x = np.random.choice(iters, int(iters*p4flip), replace=False)
        # real_seg = get_seg(real_curr[3], n_segments=n_segments, compactness=compactness, sigma=sigma, start_label=start_label)

        zeros = torch.zeros_like(real_curr[3]).to(device)
        edge_w, edge_h = math.ceil(0.1*real_curr[3].shape[3]), math.ceil(0.1*real_curr[3].shape[2])
        for i in range(edge_w):
            zeros[:,:,:,i] = 1.
        for i in range(real_curr[3].shape[3]-edge_w, real_curr[3].shape[3]):
            zeros[:,:,:,i] = 1.
        for i in range(edge_h):
            zeros[:,:,i,:] = 1.
        for i in range(real_curr[3].shape[2]-edge_h, real_curr[3].shape[2]):
            zeros[:,:,i,:] = 1.
        assert zeros[0,0,0,0] == 1

        if mode == 'f':
            alpha4cos = alpha4cos_ini
            if scale_num >= scales: # 4 5
                alpha4rec = alpha4rec_ini * 10
            else: # 0 1 2 3
                alpha4rec = alpha4rec_ini
            real_curr[3] = real_curr[3].to(device)
            h, w = real_curr[3].shape[2], real_curr[3].shape[3]
            D, G, optimizerD, optimizerG, schedulerD, schedulerG = organise_models(
                mode, device, weights2load, lr_g, lr_d, channels, kernel_size, stride, if_padding,
                G_num_layer, D_num_layer, out_channels, factor=0.01+weight4style*(scales-scale_num-1)/scales
                )
        elif mode == 'b':
            # if scale_num <= 0:
            #     lr_g = 0.0001
            #     lr_d = 0.0001
            alpha4rec = alpha4rec_ini
            alpha4cos = alpha4cos_ini
            real_curr[1] = real_curr[1].to(device)
            h, w = real_curr[1].shape[2], real_curr[1].shape[3]
            D, G, optimizerD, optimizerG, schedulerD, schedulerG = organise_models(
                mode, device, weights2load, lr_g, lr_d, channels, kernel_size, stride, if_padding, G_num_layer, D_num_layer, out_channels
                )

        # [D, G], [optimizerD, optimizerG] = amp.initialize([D, G], [optimizerD, optimizerG], opt_level='O1', num_losses=14)

        # p_loss = 0
        #p_loss = p_loss.to(device)
        r_loss = recon_loss(False)
        r_loss = r_loss.to(device)

        if if_padding:
            padder = make_padder(0)
        else:
            padder = make_padder((G_num_layer-1)*1+2+1)
        # if opts.ani==True:
        #     fpadder = make_padder(0)
        #     h_f = h_f + (1+2+G_num_layer*2)*2
        #     w_f = w_f + (1+2+G_num_layer*2)*2
        noise_1 = padder(Generate_noise([channels, h, w], device=device, if_0=True, if_c_same=False))
        epoch_iterator = create_progressbar(
            iterable=range(iters),
            desc="Training scale [{}/{}]".format(scale_num, scales-1),
            offset=0, leave=True, logging_on_update=False, logging_on_close=True, postfix=True
        )
        for i in epoch_iterator:
            epoch_iterator.set_description('Scale [{}/{}], Iteration [{}/{}]'.format(scale_num+1, scales, i+1, iters))
            if mode == 'f':
                if i >= 1600 and scale_num > 0:
                    alpha4rec = alpha4rec_ini
                styles_ref = []
                _tmp = real_curr[3].squeeze(0).cpu()
                for cnt in range(G_num_layer*2+2):
                    if if_padding:
                        _padder = make_padder(0)
                    else:
                        _padder = make_padder(2+2*3+(G_num_layer-1-cnt)*1)
                    _augment = data_augmenter(_tmp, device=device)
                    _augment_ = _padder(_augment)
                    styles_ref.append(_augment_.detach())
                del _augment, _augment_

            if Gs == []:
                noise_1 = padder(Generate_noise([1, h, w], device=device, if_0=False, if_c_same=True))
                noise_2 = padder(Generate_noise([1, h, w], device=device, if_0=False, if_c_same=True))
                # noise_2_f = padder(get_slerp_interp([1, h_f, w_f], device=device, iters=iters, iter_curr=i, if_c_same=True, start=noise_2_f_s, end=noise_2_f_e))
            else:
                noise_2 = padder(Generate_noise([channels, h, w], device=device, if_0=False, if_c_same=False))
                # noise_2_f = padder(get_slerp_interp([channels, h_f, w_f], device=device, iters=iters, iter_curr=i, if_c_same=False, start=noise_2_f_s, end=noise_2_f_e))

            for j in range(D_steps):
                if (j == 0) & (i == 0):
                    if Gs == []:
                        noise_3 = padder(Generate_noise([channels, h, w], device=device, if_0=True, if_c_same=False))
                        prev = torch.full([1, channels, h, w], 0, device=device)
                        _ = prev
                        prev = padder(prev)
                        noise_weight = 1
                    else:
                        criterion = nn.MSELoss()
                        if mode == 'f':
                            prev = padder(draw_concat(Gs, Zs, reals_fa, NoiseWeight, _, 'rand', kernel_size, channels, device, padder, G_num_layer, mode))
                            noise_3 = draw_concat(Gs, Zs, reals_fa, NoiseWeight, _, 'rec', kernel_size, channels, device, padder, G_num_layer, mode)
                            RMSE = torch.sqrt(criterion(real_curr[3], noise_3))
                        elif mode == 'b':
                            prev = padder(draw_concat(Gs, Zs, reals_b, NoiseWeight, _, 'rand', kernel_size, channels, device, padder, G_num_layer, mode))
                            noise_3 = draw_concat(Gs, Zs, reals_b, NoiseWeight, _, 'rec', kernel_size, channels, device, padder, G_num_layer, mode)
                            RMSE = torch.sqrt(criterion(real_curr[1], noise_3))
                        noise_weight = noise_weight_ini*RMSE
                        noise_3 = padder(noise_3)
                else:
                    if mode == 'f':
                        prev = padder(draw_concat(Gs, Zs, reals_fa, NoiseWeight, _, 'rand', kernel_size, channels, device, padder, G_num_layer, mode))
                    elif mode == 'b':
                        prev = padder(draw_concat(Gs, Zs, reals_b, NoiseWeight, _, 'rand', kernel_size, channels, device, padder, G_num_layer, mode))
                if Gs == []:
                    noise = noise_2
                else:
                    noise = noise_weight * noise_2 + prev
                D.zero_grad()
                if mode == 'f':
                    output = D(real_curr[3])
                elif mode == 'b':
                    output = D(real_curr[1])
                if i in x:
                    errD_real = output.mean()
                else:
                    errD_real = -output.mean()
                # with amp.scale_loss(errD_real, optimizerD, loss_id=0) as errD_real:
                #     errD_real.backward(retain_graph=True)
                errD_real.backward(retain_graph=True)
                if i in x:
                    errD_real = -errD_real
                if mode == 'f':
                    fake = G(noise.detach(), styles_ref, prev)
                elif mode == 'b':
                    fake = G(noise.detach(), prev)
                output = D(fake.detach())
                if i in x:
                    errD_fake = -output.mean()
                else:
                    errD_fake = output.mean()
                # with amp.scale_loss(errD_fake, optimizerD, loss_id=1) as errD_fake:
                #     errD_fake.backward(retain_graph=True)
                errD_fake.backward(retain_graph=True)
                if i in x:
                    errD_fake = -errD_fake

                if mode == 'f':
                    gradient_penalty = calc_gradient_penalty(D, real_curr[3], fake, lambda_grad, device)
                elif mode == 'b':
                    gradient_penalty = calc_gradient_penalty(D, real_curr[1], fake, lambda_grad, device)
                # with amp.scale_loss(gradient_penalty, optimizerD, loss_id=2) as gradient_penalty:
                #     gradient_penalty.backward()
                gradient_penalty.backward()

                optimizerD.step()
                D.zero_grad()
                optimizerD.zero_grad()

            _errD_real = errD_real.item()
            _errD_fake = errD_fake.item()
            _gradient_penalty = gradient_penalty.item()
            del errD_real, errD_fake, gradient_penalty
            _errD = _errD_real + _errD_fake + _gradient_penalty
            errD2plot.append([_errD_real, _errD_fake, _gradient_penalty])
            schedulerD.step(_errD)
            for j in range(G_steps):
                G.zero_grad()
                ###
                output = D(fake)
                ###
                errG = -output.mean()
                # with amp.scale_loss(errG, optimizerG, loss_id=3) as errG:
                #     errG.backward(retain_graph=True)
                errG.backward(retain_graph=True)
                Z_opt = noise_weight * noise_1 + noise_3
                if mode == 'f':
                    _tmp = G(Z_opt.detach(), styles_ref, noise_3)
                elif mode == 'b':
                    _tmp = G(Z_opt.detach(), noise_3)

                if alpha4rec != 0:
                    # loss = r_loss
                    loss = nn.L1Loss()
                    Z_opt = noise_weight * noise_1 + noise_3

                    if mode == 'f':
                        _loss = loss(_tmp*zeros, real_curr[3]*zeros)
                        # _loss = calc_local_rec(loss, _tmp, real_seg)
                    elif mode == 'b':
                        _loss = loss(_tmp, real_curr[1])
                    rec_loss = alpha4rec * _loss
                    del _loss
                    # with amp.scale_loss(rec_loss, optimizerG, loss_id=4) as rec_loss:
                    #     rec_loss.backward(retain_graph=True)
                    rec_loss.backward(retain_graph=True)
                    rec_loss = rec_loss.detach()
                else:
                    Z_opt = noise_1
                    rec_loss = torch.Tensor([0])
                if alpha4cos != 0:
                    loss = nn.CosineEmbeddingLoss()
                    Z_opt = noise_weight * noise_1 + noise_3
                    if mode == 'f':
                        _loss = loss(_tmp, real_curr[3], torch.ones_like(real_curr[3]))
                    elif mode == 'b':
                        _loss = loss(_tmp, real_curr[1], torch.ones_like(real_curr[1]))
                    cos_loss = alpha4cos * _loss
                    del _loss
                    # with amp.scale_loss(cos_loss, optimizerG, loss_id=5) as cos_loss:
                    #     cos_loss.backward(retain_graph=True)
                    cos_loss.backward(retain_graph=True)
                    cos_loss = cos_loss.detach()
                else:
                    Z_opt = noise_1
                    cos_loss = torch.Tensor([0])
                if alpha4vgg_ini != 0:
                    loss = p_loss
                    Z_opt = noise_weight * noise_1 + noise_3
                    if mode == 'f':
                        # _loss = alpha4vgg_ini * loss(_tmp, real_curr[3], device)
                        _loss = loss(_tmp, real_curr[3], device)
                    elif mode == 'b':
                        _loss = alpha4vgg_ini * loss(_tmp, real_curr[1], device)
                    perceptual_loss = _loss
                    # perceptual_loss1 = _loss1
                    # perceptual_loss2 = _loss2
                    del _loss
                    # perceptual_loss = factor4vgg[scale_num] * alpha4vgg * p_loss(G(Z_opt.detach(), styles_ref, noise_3), real_curr[3], device)
                    # perceptual_loss = factor4vgg[scale_num] * alpha4vgg * p_loss(G(Z_opt.detach(), noise_3), real_curr[1], device)
                    # with amp.scale_loss(perceptual_loss_f, optimizerG_f, loss_id=6) as perceptual_loss_f:
                    #     perceptual_loss_f.backward(retain_graph=True)
                    # with amp.scale_loss(perceptual_loss, optimizerG, loss_id=5) as perceptual_loss:
                    #     perceptual_loss.backward(retain_graph=True)

                    # perceptual_loss1.backward(retain_graph=True)
                    perceptual_loss.backward(retain_graph=True)
                    # perceptual_loss = perceptual_loss1.detach() + perceptual_loss2.detach()
                    perceptual_loss = perceptual_loss.detach()
                else:
                    Z_opt = noise_1
                    perceptual_loss = torch.Tensor([0])
                optimizerG.step()
                G.zero_grad()
                optimizerG.zero_grad()
            _errG = errG.item()
            _rec_loss = rec_loss.item()
            _cos_loss = cos_loss.item()
            _perceptual_loss = perceptual_loss.item()
            del errG, rec_loss, cos_loss, perceptual_loss
            errG2plot.append([_errG, _rec_loss, _cos_loss, _perceptual_loss])
            _errG = _errG + _rec_loss + _cos_loss + _perceptual_loss
            schedulerG.step(_errG)
            del noise_2
            if i % 200 == 0 or i == (iters-1):
                if mode == 'b':
                    _fake = fake.cpu()
                    _fake = _fake * masks_b[scale_num]
                    _fake = _fake + masks_b[scale_num] - torch.ones_like(masks_b[scale_num])
                    plt.imsave('%s/fake_%s_%s.png' %  (outfile_dir, mode, str(i)), convert_image_np(_fake.detach()), vmin=0, vmax=1)
                elif mode == 'f':
                    plt.imsave('%s/fake_%s_%s.png' %  (outfile_dir, mode, str(i)), convert_image_np(fake.detach()), vmin=0, vmax=1)
            if i % 500 == 0 or i == (iters-1):
                plot_sinloss(errG2plot, errD2plot, scale_num, iters_list, outfile_dir, mode, i)
        epoch_iterator.close()
        torch.save(G.state_dict(), '%s/G_%s.pth' % (outfile_dir, mode))
        torch.save(D.state_dict(), '%s/D_%s.pth' % (outfile_dir, mode))
        G = reset_grads(G, False)
        G.eval()
        D = reset_grads(D, False)
        D.eval()
        Gs.append(G)
        Ds.append(D)
        NoiseWeight.append(noise_weight)
        Zs.append(noise_1)
        # torch.save(Gs, '%s/Gs.pth' % (dir2save))
        torch.save(Zs, '%s/Zs.pth' % (dir2save))
        torch.save(NoiseWeight, '%s/noiseweight_%s.pth' % (dir2save, mode))
        del D, G
    torch.cuda.synchronize()
    end_time = time.time()
    logger.info('-' * 80)
    logger.info(green + '[INFO]: training time cost : %s' % seconds2time(end_time - start_time) + reset)
    logger.info('-' * 80)
    logger.info(green + '[INFO]: randomly generating %s samples...' %(opts.gen_num) + reset)
    logger.info('-' * 80)
    if mode == 'f':
        ran_gen(Gs, Zs, NoiseWeight, reals_fa, opts, dir2gen, padder)
    elif mode == 'b':
        ran_gen(Gs, Zs, NoiseWeight, reals_b, opts, dir2gen, padder)
    logger.info('-' * 80)
    logger.info(green + '[INFO]: calculating eval metrics...' + reset)
    logger.info('-' * 80)
    sifid = calculate_sifid_given_paths(dir2gen+'real.png', dir2gen, batch_size=1, dims=64, suffix='png')
    diversity = calculate_cs(dir2gen, suffix='png')
    logger.info(green + '[INFO]: SIFID : %6f   DIVERSITY : %6f   GQI : %6f ' % (sifid, diversity, diversity/sifid)+ reset)
Ejemplo n.º 27
0
def main(_):
  """
  Entry point and main loop for train_net.py. Uses command line arguments to get
  model and training specification (see config.py).
  """
  configs.DEFINE_string("train_datafile", None,"Training file")
  configs.DEFINE_string("optimizer", 'gd', 'Optimizer to use gd, adam, adagrad, momentum')
  configs.DEFINE_float("lr_decay",0.9, "Learning rate decay")
  configs.DEFINE_float("initial_learning_rate",1.0,"Initial learning rate")
  configs.DEFINE_float("validation_size",0.0,"Size of validation set as %")
  configs.DEFINE_float("passes",1.0,"Passes through day per epoch")
  configs.DEFINE_float("rnn_loss_weight",None,"How much moret to weight kth example")
  configs.DEFINE_integer("max_epoch",0,"Stop after max_epochs")
  configs.DEFINE_integer("early_stop",None,"Early stop parameter")
  configs.DEFINE_integer("seed",None,"Seed for deterministic training")

  config = configs.get_configs()

  if config.train_datafile is None:
     config.train_datafile = config.datafile

  train_path = model_utils.get_data_path(config.data_dir,config.train_datafile)

  print("Loading training data ...")

  train_data = BatchGenerator(train_path, config,
			      config.batch_size,config.num_unrollings,
			      validation_size=config.validation_size,
			      randomly_sample=True)

  tf_config = tf.ConfigProto( allow_soft_placement=True  ,
                              log_device_placement=False )

  with tf.Graph().as_default(), tf.Session(config=tf_config) as session:

    if config.seed is not None:
      tf.set_random_seed(config.seed)

    print("Constructing model ...")

    model = model_utils.get_training_model(session, config, verbose=True)


    if config.early_stop is not None:
      print("Training will early stop without "
        "improvement after %d epochs."%config.early_stop)
    
    train_history = list()
    valid_history = list()
    # This sets the initial learning rate tensor
    lr = model.assign_lr(session,config.initial_learning_rate)

    for i in range(config.max_epoch):

      trc, tre, vdc, vde = run_epoch(session, model, train_data,
                                       keep_prob=config.keep_prob,
                                       passes=config.passes,
                                       verbose=True)

      trc = 999.0 if trc > 999.0 else trc
      vdc = 999.0 if vdc > 999.0 else vdc

      print( ('Epoch: %d loss: %.6f %.6f'
              ' error: %.6f %.6f Learning rate: %.4f') %
            (i + 1, trc, vdc, tre, vde, lr) )
      sys.stdout.flush()

      train_history.append( trc )
      valid_history.append( vdc )

      # update learning rate 
      if config.optimizer == 'gd' or config.optimizer == 'momentum':
        lr = model_utils.adjust_learning_rate(session, model, 
                                              lr, config.lr_decay, train_history )

      if not os.path.exists(config.model_dir):
        print("Creating directory %s" % config.model_dir)
        os.mkdir(config.model_dir)

      chkpt_file_prefix = "training.ckpt"
      if model_utils.stop_training(config,valid_history,chkpt_file_prefix):
        print("Training stopped.")
        quit()
      else:
        checkpoint_path = os.path.join(config.model_dir, chkpt_file_prefix)
        tf.train.Saver().save(session, checkpoint_path, global_step=i)
Ejemplo n.º 28
0
import redis, os, flask, numpy
import planning, configs

def_conf = configs.get_configs('configs/base')
app_conf = configs.get_configs('configs')

app = configs.create_app(app_conf['SECRET_KEY'])
data = redis.Redis(host='redis', port=6379)


@app.route('/dim')
@app.route('/')
def dim():
    global app_conf, def_conf

    return flask.render_template('dim.html', **def_conf)


@app.route('/last')
def last():
    global def_conf, data

    request = data.hgetall('compute')

    if not request:
        return flask.render_template('last.html', height=None, **def_conf)

    return flask.render_template(
        'last.html',
        height=int(request[b'height']),
        width=int(request[b'width']),
Ejemplo n.º 29
0
def main(_):
  """
  """
  configs.DEFINE_string('test_datafile',None,'file with test data')
  configs.DEFINE_string('output','preds.dat','file for predictions')
  configs.DEFINE_string('time_field','date','fields used for dates/time')
  configs.DEFINE_string('print_start','190001','only print data on or after')
  configs.DEFINE_string('print_end','210012','only print data on or before')
  configs.DEFINE_string('factor_name',None,'Name of factor if nn_type=factor')
  configs.DEFINE_integer('min_test_k',1,'minimum seq length classified')
  configs.DEFINE_integer('num_batches',None,'num_batches overrride')

  config = configs.get_configs()

  factor_name = config.factor_name
  assert(factor_name is not None)
  
  if config.test_datafile is None:
    config.test_datafile = config.datafile
  batch_size = 1
  num_unrollings = config.num_unrollings
  data_path = model_utils.get_data_path(config.data_dir,config.test_datafile)
  filename=data_path
  
  print("Loading data %s"%data_path)
  if not os.path.isfile(filename):
    raise RuntimeError("The data file %s does not exists" % filename)
  data = pd.read_csv(filename,sep=' ',
		       dtype={ config.key_field : str, 'date' : str } )
  if config.end_date is not None:
    data = data.drop(data[data['date'] > str(config.end_date)].index)

  num_data_points = len(data)

  params = dict()  
     
  print("num data points = ", num_data_points)

  stats = dict()
  key   = 'ALL'
  stats[key] = list()

  with open(config.output, "w") as outfile:

    last_key = ''
    seq_len = 0
    
    for i in range(num_data_points):
      key = get_value( data, config.key_field, i )
      date = get_value( data, 'date', i )
      seq_len = seq_len + 1 if key == last_key else 1
      last_key = key
      if (str(date) < config.print_start or str(date) > config.print_end):
        continue
      if seq_len < config.min_test_k:
        continue
      prob = get_value(data, factor_name, i )
      out = get_value(data, config.target_field, i )
      target = (out+1.0)/2.0
      k = min(seq_len,config.num_unrollings)
      outfile.write("%s %s "
        "%.4f %.4f %d %d\n" % (key, date, 1.0 - prob, prob, target, k) )
      pred   = +1.0 if prob >= 0.5 else 0.0
      error = 0.0 if (pred == target) else 1.0
      tpos = 1.0 if (pred==1 and target==1) else 0.0
      tneg = 1.0 if (pred==0 and target==0) else 0.0
      fpos = 1.0 if (pred==1 and target==0) else 0.0
      fneg = 1.0 if (pred==0 and target==1) else 0.0
      # print("pred=%.2f target=%.2f tp=%d tn=%d fp=%d fn=%d"%(pred,target,tp,tn,fp,fn))
      rec = {
        'error' : error ,
	'tpos'  : tpos  ,
	'tneg'  : tneg  ,
	'fpos'  : fpos  ,
	'fneg'  : fneg  }
      if date not in stats:
        stats[date] = list()
      stats[date].append(rec)
      stats['ALL'].append(rec)

  print_summary_stats(stats)
Ejemplo n.º 30
0
import os
from configs import get_configs
from .dict_utils import extract_dict_fields

loaded_json = get_configs()

dataset_file = (
    extract_dict_fields(loaded_json, ['dataset_file']))


def get_dataset_file_path():
    return f'{os.getcwd()}/{dataset_file}'


def file_exists(file_path):
    return os.path.exists(file_path)
Ejemplo n.º 31
0
    Returns:
      If DNN_QUANT_ROOT is defined, the fully qualified data path is returned
      Otherwise a path relative to the working directory is returned
    """
    path = os.path.join( data_dir, filename ) 
    # path = data_dir + '/' + filename
    if data_dir != '.' and 'DNN_QUANT_ROOT' in os.environ:
        # path = os.environ['DNN_QUANT_ROOT'] + '/' + path
        path = os.path.join(os.environ['DNN_QUANT_ROOT'], path)
    return path

configs.DEFINE_string("train_datafile", None,"Training file")
configs.DEFINE_float("validation_size",0.0,"Size of validation set as %")
configs.DEFINE_integer("seed",None,"Seed for deterministic training")
configs.DEFINE_float("rnn_loss_weight",None,"How much moret to weight kth example")
config = configs.get_configs()

if config.train_datafile is None:
    config.train_datafile = config.datafile

train_path = get_data_path(config.data_dir,config.train_datafile)

print("Loading batched data ...")

batches = BatchGenerator(train_path, config,
                         config.batch_size,config.num_unrollings,
                         validation_size=config.validation_size,
                         randomly_sample=True)


for i in range(10):
Ejemplo n.º 32
0
import sys
import subprocess

from flask import Flask, request

sys.path.append(os.path.join(os.path.abspath(__file__).rsplit('/', 1)[0], 'logger'))

from configs import get_configs
from mylogger import Logger

log_main = Logger.get_logger(__file__)


app = Flask(__name__)

configs_sys = get_configs()         # 系统配置


# Route
@app.route('/deploy/<project>', methods=['POST'])
def deploy(project=None):
    if project.upper() not in configs_sys['GIT']:
        log_main.critical('No such project: {0}'.format(project))
        sys.exit(-1)

    html_url = request.json['repository']['html_url']
    local_url = configs_sys['GIT'][project.upper()]['URL']

    if html_url != local_url:
        log_main.critical('Project {0} does not match {1} from github'.format(local_url, html_url))
        sys.exit(-1)
import sys
import torch
from torch import nn

import argparse

DEVICE = 'cuda' if torch.cuda.is_available() else 'cpu'

if __name__ == '__main__':

    parser = argparse.ArgumentParser('nn models for inverse design')
    parser.add_argument('--model', type=str, default='inn')
    args = parser.parse_args()

    train_loader, val_loader, test_loader = get_dataloaders(args.model)
    configs = get_configs(args.model)

    if args.model in ['forward_model', 'inverse_model']:
        model = MLP(configs['input_dim'], configs['output_dim']).to(DEVICE)
        optimizer = torch.optim.Adam(model.parameters(), lr=configs['learning_rate'], weight_decay=configs['weight_decay'])
    
    elif args.model in ['tandem_net']:

        forward_model = MLP(4, 3).to(DEVICE)
        forward_model.load_state_dict(torch.load('./models/forward_model.pth')['model_state_dict'])
        inverse_model = MLP(3, 4).to(DEVICE)
        inverse_model.load_state_dict(torch.load('./models/inverse_model.pth')['model_state_dict'])
        model = TandemNet(forward_model, inverse_model)
        optimizer = torch.optim.Adam(model.inverse_model.parameters(), lr=configs['learning_rate'], weight_decay=configs['weight_decay'])
        
    elif args.model in ['vae']:
Ejemplo n.º 34
0
def main(_):
    """
  Entry point and main loop for train_net.py. Uses command line arguments to get
  model and training specification (see config.py).
  """
    configs.DEFINE_string("train_datafile", None, "Training file")
    configs.DEFINE_string("optimizer", 'gd',
                          'Optimizer to use gd, adam, adagrad, momentum')
    configs.DEFINE_float("lr_decay", 0.9, "Learning rate decay")
    configs.DEFINE_float("initial_learning_rate", 1.0, "Initial learning rate")
    configs.DEFINE_float("validation_size", 0.0, "Size of validation set as %")
    configs.DEFINE_float("passes", 1.0, "Passes through day per epoch")
    configs.DEFINE_float("rnn_loss_weight", None,
                         "How much moret to weight kth example")
    configs.DEFINE_integer("max_epoch", 0, "Stop after max_epochs")
    configs.DEFINE_integer("early_stop", None, "Early stop parameter")
    configs.DEFINE_integer("seed", None, "Seed for deterministic training")

    config = configs.get_configs()

    if config.train_datafile is None:
        config.train_datafile = config.datafile

    train_path = model_utils.get_data_path(config.data_dir,
                                           config.train_datafile)

    print("Loading training data ...")

    train_data = BatchGenerator(train_path,
                                config,
                                config.batch_size,
                                config.num_unrollings,
                                validation_size=config.validation_size,
                                randomly_sample=True)

    tf_config = tf.ConfigProto(allow_soft_placement=True,
                               log_device_placement=False)

    with tf.Graph().as_default(), tf.Session(config=tf_config) as session:

        if config.seed is not None:
            tf.set_random_seed(config.seed)

        print("Constructing model ...")

        model = model_utils.get_training_model(session, config, verbose=True)

        if config.early_stop is not None:
            print("Training will early stop without "
                  "improvement after %d epochs." % config.early_stop)

        train_history = list()
        valid_history = list()
        # This sets the initial learning rate tensor
        lr = model.assign_lr(session, config.initial_learning_rate)

        for i in range(config.max_epoch):

            trc, tre, vdc, vde = run_epoch(session,
                                           model,
                                           train_data,
                                           keep_prob=config.keep_prob,
                                           passes=config.passes,
                                           verbose=True)

            trc = 999.0 if trc > 999.0 else trc
            vdc = 999.0 if vdc > 999.0 else vdc

            print(('Epoch: %d loss: %.6f %.6f'
                   ' error: %.6f %.6f Learning rate: %.4f') %
                  (i + 1, trc, vdc, tre, vde, lr))
            sys.stdout.flush()

            train_history.append(trc)
            valid_history.append(vdc)

            # update learning rate
            if config.optimizer == 'gd' or config.optimizer == 'momentum':
                lr = model_utils.adjust_learning_rate(session, model, lr,
                                                      config.lr_decay,
                                                      train_history)

            if not os.path.exists(config.model_dir):
                print("Creating directory %s" % config.model_dir)
                os.mkdir(config.model_dir)

            chkpt_file_prefix = "training.ckpt"
            if model_utils.stop_training(config, valid_history,
                                         chkpt_file_prefix):
                print("Training stopped.")
                quit()
            else:
                checkpoint_path = os.path.join(config.model_dir,
                                               chkpt_file_prefix)
                tf.train.Saver().save(session, checkpoint_path, global_step=i)