Exemplo n.º 1
0
parser.add_argument('--crop_height', type=int, default=512, help='Height of cropped input image to network')
parser.add_argument('--crop_width', type=int, default=512, help='Width of cropped input image to network')
parser.add_argument('--clip_size', type=int, default=450, help='Width of cropped input image to network')
parser.add_argument('--num_epochs', type=int, default=80, help='Number of epochs to train for')
parser.add_argument('--h_flip', type=bool, default=True, help='Whether to randomly flip the image horizontally for data augmentation')
parser.add_argument('--v_flip', type=bool, default=True, help='Whether to randomly flip the image vertically for data augmentation')
parser.add_argument('--color', type=bool, default=True, help='Whether to randomly flip the image vertically for data augmentation')
parser.add_argument('--rotation', type=bool, default=True, help='randomly rotate, the imagemax rotation angle in degrees.')
parser.add_argument('--start_valid', type=int, default=20, help='Number of epoch to valid')
parser.add_argument('--valid_step', type=int, default=1, help="Number of step to validation")


args = parser.parse_args()
num_images=[]
train_img, train_label,valid_img,valid_lab= prepare_data()
num_batches=len(train_img)//(args.batch_size)

img=tf.placeholder(tf.float32,[None,args.crop_height,args.crop_width,3])
is_training=tf.placeholder(tf.bool)
label=tf.placeholder(tf.float32,[None,args.crop_height,args.crop_height,1])

pred=mapnet(img,is_training)
pred1=tf.nn.sigmoid(pred)

update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
with tf.control_dependencies(update_ops):

    sig=tf.nn.sigmoid_cross_entropy_with_logits(labels=label, logits=pred)
    sigmoid_cross_entropy_loss = tf.reduce_mean(sig)
    train_step = tf.train.AdamOptimizer(args.learning_rate).minimize(sigmoid_cross_entropy_loss)
                      img_rows, img_cols)

# In[4]:

img_rows = img_cols = 150
out_put_classes = 7
img_chs = 3

# In[5]:

prepare_data(
    img_rows=img_rows,
    img_cols=img_cols,
    img_chs=img_chs,
    out_put_classes=out_put_classes,
    img_src='/home/achbogga/CKplus/dataset/processed_alinet_new',
    label_src='/home/achbogga/CKplus/dataset/labels/emotion_labels.txt',
    asGray=False,
    face_detection_xml=
    "/home/achbogga/opencv2_data/haarcascades/haarcascade_frontalface_default.xml"
)

# In[6]:


def create_alinet(weights_path=None,
                  img_chs=img_chs,
                  img_rows=img_rows,
                  img_cols=img_cols,
                  nb_output_classes=out_put_classes,
                  drop_out_rate=0.4):
Exemplo n.º 3
0
    if type == "de":
        indexes = [SOS_token] + indexes
    return torch.tensor(indexes, dtype=torch.long,
                        requires_grad=False).unsqueeze(0)


def tensors_from_pair(input_lang, output_lang, pair, max_length_en,
                      max_length_de):
    input_tensor = tensor_from_sentence("en", input_lang, pair[0],
                                        max_length_en)
    target_tensor = tensor_from_sentence("de", output_lang, pair[1],
                                         max_length_de)
    return input_tensor, target_tensor


input_lang, output_lang, _ = prepare_data(lang1, lang2, 40)

d_model = 128
heads = 8
N = 6
src_vocab = input_lang.n_words
trg_vocab = output_lang.n_words
en_weight_matrix = Embedder.initial_weights_matrix(
    "word_vector/glove.6B.300d.txt", input_lang, 300)
de_weight_matrix = Embedder.initial_weights_matrix(
    "word_vector/vn_word2vec_300d.txt", input_lang, 300)
src_vocab = input_lang.n_words
trg_vocab = output_lang.n_words

model = Transformer(src_vocab, trg_vocab, d_model, N, heads, device,
                    en_weight_matrix, de_weight_matrix)
Exemplo n.º 4
0
    word2idx[word]: vec
    for word, vec in word2vec.items() if word in word2idx
}
unk_embedding = np.random.multivariate_normal(np.zeros(embedding_size),
                                              np.eye(embedding_size))
emb_mat = np.array([
    idx2vec[idx] if idx in idx2vec else unk_embedding
    for idx in range(vocab_size)
])
print("emb_mat:", emb_mat.shape)

word_embeddings = tf.constant(emb_mat, dtype=tf.float32)
label_embeddings = tf.get_variable(name="embeddings",
                                   shape=[n_classes, embedding_size],
                                   dtype=tf.float32)
x_input, x_mask_input, x_len, y_seqs, y_decode, y_len, y_mask_input, train_size = prepare_data(
    data_type="train", word2idx=word2idx, test_true_label=True)
t_x_input, t_x_mask_input, t_x_len, t_label_seq, t_y_decode, t_y_len, t_y_mask_input, test_size = prepare_data(
    data_type="test", word2idx=word2idx, test_true_label=True)
# train_batch_size, test_batch_size = train_size, test_size
train_batch_size, test_batch_size = 60, 60
train_dataset = tf.data.Dataset.from_tensor_slices(
    (x_input, x_mask_input, x_len, y_seqs, y_decode, y_len, y_mask_input))
train_dataset = train_dataset.shuffle(
    buffer_size=1000).repeat(train_epoch).batch(60)
test_dataset = tf.data.Dataset.from_tensor_slices(
    (t_x_input, t_x_mask_input, t_x_len, t_label_seq, t_y_decode, t_y_len,
     t_y_mask_input))
test_dataset = test_dataset.batch(60)

# check
train_iter = train_dataset.make_one_shot_iterator().get_next()
Exemplo n.º 5
0
# EMU info random generation
def emu_order(b):
    return b[1]


def emu_info(num):
    emu_data = np.zeros((num, 3), dtype=float)
    for i in range(len(emu_data)):
        emu_data[i, 0] = i
        emu_data[i, 1] = random.randint(0, 4000)
    #emu_data = np.array(sorted(emu_data, key = emu_order))
    return emu_data


running_lines, start_stations, start_prepared,end_prepared,\
cost_time_graph, repair_qualify = prepare_data(cfg.case1_path)
"""parameter"""
city_num = len(running_lines)
(tsc, tsp_1) = cfg.total_param
"""main part"""
emu_data_initial = emu_info(city_num)
#emu_data_initial = np.loadtxt("emu_data.txt")

best_num_plot = []
best_tim_iternum = []
best_time_plot = []
best_emu_lines_total = []
best_num_total = city_num
best_repair_total = 50
best_time_total = 3000
Exemplo n.º 6
0
def benchmark_task(args, writer=None, feat='node-label'):
    """

    :param args:
    :param writer:
    :param feat:
    :return:
    """
    # load graphs from file: graphs = [nx.Graph]
    graphs = load_data.read_graphfile(args.datadir,
                                      args.bmname,
                                      max_nodes=args.max_nodes)

    if feat == 'node-feat' and 'feat_dim' in graphs[0].graph:
        print('Using node features')
        input_dim = graphs[0].graph['feat_dim']
    elif feat == 'node-label' and 'label' in graphs[0].node[0]:
        print('Using node labels')
        for G in graphs:
            for u in G.nodes():
                G.node[u]['feat'] = np.array(G.node[u]['label'])
    else:
        print('Using constant labels')
        featgen_const = featgen.ConstFeatureGen(
            np.ones(args.input_dim, dtype=float))
        for G in graphs:
            featgen_const.gen_node_features(G)

    train_dataset, val_dataset, test_dataset, max_num_nodes, input_dim, assign_input_dim = \
            load_data.prepare_data(graphs, args, max_nodes=args.max_nodes)
    if args.method == 'soft-assign':
        print('Method: soft-assign')
        model = encoders.SoftPoolingGcnEncoder(
            max_num_nodes,
            input_dim,
            args.hidden_dim,
            args.output_dim,
            args.num_classes,
            args.num_gc_layers,
            args.hidden_dim,
            assign_ratio=args.assign_ratio,
            num_pooling=args.num_pool,
            bn=args.bn,
            dropout=args.dropout,
            linkpred=args.linkpred,
            args=args,
            assign_input_dim=assign_input_dim).cuda()
    elif args.method == 'base-set2set':
        print('Method: base-set2set')
        model = encoders.GcnSet2SetEncoder(input_dim,
                                           args.hidden_dim,
                                           args.output_dim,
                                           args.num_classes,
                                           args.num_gc_layers,
                                           bn=args.bn,
                                           dropout=args.dropout,
                                           args=args).cuda()
    else:
        print('Method: base')
        model = encoders.GcnEncoderGraph(input_dim,
                                         args.hidden_dim,
                                         args.output_dim,
                                         args.num_classes,
                                         args.num_gc_layers,
                                         bn=args.bn,
                                         dropout=args.dropout,
                                         args=args).cuda()

    train(train_dataset,
          model,
          args,
          val_dataset=val_dataset,
          test_dataset=test_dataset,
          writer=writer)
    evaluate(test_dataset, model, args, 'Validation')
Exemplo n.º 7
0
def syn_community2hier(args, writer=None):

    # data
    feat_gen = [featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float))]
    graphs1 = datagen.gen_2hier(1000, [2, 4], 10, range(4, 5), 0.1, 0.03,
                                feat_gen)
    graphs2 = datagen.gen_2hier(1000, [3, 3], 10, range(4, 5), 0.1, 0.03,
                                feat_gen)
    graphs3 = datagen.gen_2community_ba(range(28, 33), range(4, 7), 1000, 0.25,
                                        feat_gen)

    for G in graphs1:
        G.graph['label'] = 0
    for G in graphs2:
        G.graph['label'] = 1
    for G in graphs3:
        G.graph['label'] = 2

    graphs = graphs1 + graphs2 + graphs3

    train_dataset, val_dataset, test_dataset, max_num_nodes, input_dim, assign_input_dim = load_data.prepare_data(
        graphs, args)

    if args.method == 'soft-assign':
        print('Method: soft-assign')
        model = encoders.SoftPoolingGcnEncoder(
            max_num_nodes,
            input_dim,
            args.hidden_dim,
            args.output_dim,
            args.num_classes,
            args.num_gc_layers,
            args.hidden_dim,
            assign_ratio=args.assign_ratio,
            num_pooling=args.num_pool,
            bn=args.bn,
            linkpred=args.linkpred,
            args=args,
            assign_input_dim=assign_input_dim).cuda()
    elif args.method == 'base-set2set':
        print('Method: base-set2set')
        model = encoders.GcnSet2SetEncoder(
            input_dim,
            args.hidden_dim,
            args.output_dim,
            2,
            args.num_gc_layers,
            bn=args.bn,
            args=args,
            assign_input_dim=assign_input_dim).cuda()
    else:
        print('Method: base')
        model = encoders.GcnEncoderGraph(input_dim,
                                         args.hidden_dim,
                                         args.output_dim,
                                         2,
                                         args.num_gc_layers,
                                         bn=args.bn,
                                         args=args).cuda()
    train(train_dataset,
          model,
          args,
          val_dataset=val_dataset,
          test_dataset=test_dataset,
          writer=writer)
Exemplo n.º 8
0
def syn_community1v2(args, writer=None, export_graphs=False):

    # data
    graphs1 = datagen.gen_ba(
        range(40, 60), range(4, 5), 500,
        featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float)))
    for G in graphs1:
        G.graph['label'] = 0
    if export_graphs:
        util.draw_graph_list(graphs1[:16], 4, 4, 'figs/ba')

    graphs2 = datagen.gen_2community_ba(
        range(20, 30), range(4, 5), 500, 0.3,
        [featgen.ConstFeatureGen(np.ones(args.input_dim, dtype=float))])
    for G in graphs2:
        G.graph['label'] = 1
    if export_graphs:
        util.draw_graph_list(graphs2[:16], 4, 4, 'figs/ba2')

    graphs = graphs1 + graphs2

    train_dataset, val_dataset, test_dataset, max_num_nodes, input_dim, assign_input_dim = load_data.prepare_data(
        graphs, args)
    if args.method == 'soft-assign':
        print('Method: soft-assign')
        model = encoders.SoftPoolingGcnEncoder(
            max_num_nodes,
            input_dim,
            args.hidden_dim,
            args.output_dim,
            args.num_classes,
            args.num_gc_layers,
            args.hidden_dim,
            assign_ratio=args.assign_ratio,
            num_pooling=args.num_pool,
            bn=args.bn,
            linkpred=args.linkpred,
            assign_input_dim=assign_input_dim).cuda()
    elif args.method == 'base-set2set':
        print('Method: base-set2set')
        model = encoders.GcnSet2SetEncoder(input_dim,
                                           args.hidden_dim,
                                           args.output_dim,
                                           2,
                                           args.num_gc_layers,
                                           bn=args.bn).cuda()
    else:
        print('Method: base')
        model = encoders.GcnEncoderGraph(input_dim,
                                         args.hidden_dim,
                                         args.output_dim,
                                         2,
                                         args.num_gc_layers,
                                         bn=args.bn).cuda()

    train(train_dataset,
          model,
          args,
          val_dataset=val_dataset,
          test_dataset=test_dataset,
          writer=writer)
Exemplo n.º 9
0
        '''

        batch_size = 32
        log_lr = 4e-4
        weight_lr = 0.001
        weight_lr1 = 0.0005

        for data_name in data_list:

            vocab_dict_path = '/tmp/pycharm_project_410/glove.6B.300d.txt'

            save_path_base = data_name
            train_data, test_data, user_dict, item_dict, \
            u_max, i_max, user_num, item_num = \
                load_data.prepare_data(data_path='/tmp/pycharm_project_410/' + data_name + '.json',
                                       bert_dir='/tmp/pycharm_project_410/bert-unbase',
                                       save_encode_result_pickle_path=f'./{save_path_base}_encode_result.pkl', )
            print(f'{data_name} finished!')

            print(u_max)
            print(i_max)

            k_size = math.floor((i_max - 4) / 5)
            print(k_size)
            batch = Batch(train_data,
                          test_data,
                          user_dict,
                          item_dict,
                          u_max,
                          i_max,
                          batch_size=32,
MIN_LENGTH = 3
MAX_LENGTH = 25

batch_size = 64
'''
read the whole data set and set up Lang objects (input_lang, output_lang) and sentence pairs
Lang objects contain
  - language name (lang.name)
  - word2index dict (word2index)
  - word counts dict (word2count)
  - index2word dict including  padding, start and end of sequence tokens (index2word)
  - vocab size including padding, start and end of sequence tokens (n_words)
'''

input_lang, output_lang, pairs = load_data.prepare_data(
    'eng', 'deu', MIN_LENGTH, MAX_LENGTH, True)

#removing words with frequencies below MIN_COUNT
MIN_COUNT = 5
input_lang.trim(MIN_COUNT)
output_lang.trim(MIN_COUNT)

print('\nBefore removing infrequent words:')
for i in range(0, 11):
    print(pairs[i])

#Remove pairs containing infrequent words (defined by MIN_COUNT)
pairs = load_data.trim_pairs(input_lang, output_lang, pairs)
print('\nAfter removing infrequent words:')
for i in range(0, 4):
    print(pairs[i])