def generate_ring_graph(n, k):
    g = Graph()
    nodes = list(range(n))
    for j in range(1, k // 2 + 1):
        targets = nodes[j:] + nodes[0:j]  # first j nodes are now last in list
        g.addEdgesFromList(zip(nodes, targets))
    return g
Exemplo n.º 2
0
class S3Handler(object):

    """SEED handler class for remote invoking"""

    def __init__(self, model_file):
        # try to load node model
        self._model = Graph()

        try:
            self._model.load(model_file)

        except Exception, e:

            SEED_LOG.error('Loading Exception: %s' % e)

            SEED_LOG.info('try "seed -i" to reinitialize model')

            raise e

        self._base_uri = \
            self._model.value(predicate=RDF.type, object=OWL.Ontology)

        SEED_LOG.info('Server model: %s' % self._base_uri)

        self._version = \
            self._model.value(subject=self._base_uri, predicate=OWL.versionInfo)

        SEED_LOG.info('Server version: %s' % self._version)

        self._cwd = \
            self._model.value(predicate=RDF.type, object=SEED.Root)

        SEED_LOG.info('Server root: %s' % self._cwd)
Exemplo n.º 3
0
def load(fileName: str) -> Graph:
    print('loader: load: loading graph ' + fileName + '...')
    with open(fileName, 'r') as inputFile:
        inputData = list(map(float, inputFile.read().split()))
        n, m = int(inputData[0]), int(inputData[1])

        if (n < 1):
            raise Exception('loader: load: invalid number of nodes', n)

        if (m < (n - 1)):
            raise Exception('loader: load: invalid number of edges', m)

        print(len(inputData))

        g = Graph(n, m)
        inputIndex, edgeIndex = 2, 0
        while (edgeIndex < m):
            u, inputIndex = int(inputData[inputIndex]), inputIndex + 1
            v, inputIndex = int(inputData[inputIndex]), inputIndex + 1
            c, inputIndex = inputData[inputIndex], inputIndex + 1

            if (u >= n):
                raise Exception('loader: load: invalid node id', u)

            if (v >= n):
                raise Exception('loader: load: invalid node id', v)

            g.addEdge(u, v, c)
            g.addEdge(v, u, c)
            edgeIndex += 1

    print('loader: load: loading graph done', g)
    return g
Exemplo n.º 4
0
def main(self):
    mode = 'eval'
    g = Graph(mode=mode)
    print('{} graph loaded.'.format(mode))
    saver = tf.train.Saver()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                          log_device_placement=False,
                                          allow_soft_placement=True)) as sess:
        writer = tf.summary.FileWriter(hp.log_dir, sess.graph)
        loaded = False
        try:
            try:
                print('try to load trained model in {} ...'.format(
                    hp.model_dir))
                saver.restore(sess, tf.train.latest_checkpoint(hp.model_dir))
                loaded = True
            finally:
                if loaded is False:
                    print(
                        'load trained model failed in eval, please check ...')
                    exit(0)
                while 1:
                    _, loss, summary, steps, y_hat = sess.run(
                        [g.train_op, g.loss, g.merged, g.global_step, g.y_hat])
                    writer.add_summary(summary, steps)
                    saver.save(
                        sess,
                        os.path.join(hp.model_dir,
                                     'eval_model_{}'.format(steps)))
                    print('eval mode \t steps  : {}, loss : {}'.format(
                        steps, loss))
        except:
            print('eval over.')
Exemplo n.º 5
0
def main():
    conn = db_utils.connect('localhost', 'root', 'boletin_oficial')

    g = Graph()

    try:
        fuente = list(
            g.fuentes_de_informacion.index.lookup(
                nombre="DineroYPolitica.org"))[0]
    except IndexError:
        fuente = g.fuentes_de_informacion.create(nombre="DineroYPolitica.org")

    for r in db_utils.query_db(conn.cursor(), QUERY):
        # XXX TODO
        # partidos politicos
        # personas (fisicas, juridicas) ver que onda con los cuits/dnis. tabla providers
        # donaciones
        # listas?

        LOG.info('%r - %r - %r %r' % (
            articulo,
            dependencia,
            puesto,
            persona,
        ))
Exemplo n.º 6
0
def generate_2d_lattice(n, m):
    g = Graph()

    for i, j in product(range(n + 1), range(m + 1)):
        g.addVertex(f'{i},{j}')

    for i, j in product(range(n + 1), range(m + 1)):
        current_node = f'{i},{j}'
        g.addEdgeIfExists(current_node, f'{i+1},{j}')
        g.addEdgeIfExists(current_node, f'{i-1},{j}')
        g.addEdgeIfExists(current_node, f'{i},{j+1}')
        g.addEdgeIfExists(current_node, f'{i},{j-1}')
    return g
Exemplo n.º 7
0
    def __init__(self, model_file):
        # try to load node model
        self._model = Graph()

        try:
            self._model.load(model_file)

        except Exception, e:

            SEED_LOG.error('Loading Exception: %s' % e)

            SEED_LOG.info('try "seed -i" to reinitialize model')

            raise e
Exemplo n.º 8
0
def pass_model(x, threshold):
    x = np.array(x)
    x = np.expand_dims(x, 0)  # [1, frames, fft/2 + 1]
    mode = 'infer'
    g = Graph(mode=mode)
    print(f'{mode} graph loaded.')
    saver = tf.train.Saver()
    with tf.Session() as sess:
        saver.restore(sess, tf.train.latest_checkpoint(hp.model_dir))
        y = sess.run([g.y_hat], feed_dict={g.x: x})
        y = np.array(y)
        y = np.squeeze(y)
    y[y < threshold] = 0
    y[y >= threshold] = 1
    return y
Exemplo n.º 9
0
def main():
    conn = db_utils.connect('localhost', 'root', 'boletin_oficial')

    g = Graph()

    # ['articulo_id', 'articulo_texto', 'dependencia_id', 'dependencia_nombre', 'per_apellido', 'per_boletines', 'per_cuit', 'per_dni', 'per_domicilio_especial', 'per_estado_civil', 'per_id', 'per_nombre', 'per_nya', 'per_prefijo', 'per_sufijo', 'per_titulo', 'puesto_id', 'puesto_nombre']

    for r in db_utils.query_db(conn.cursor(), QUERY):

        articulo = g.articulos.create(
            id=r['articulo_id'], texto=r['articulo_texto'].decode('utf-8'))

        try:
            dependencia = list(
                g.dependencias.index.lookup(id=r['dependencia_id']))[0]
        except IndexError:
            dependencia = g.dependencias.create(
                id=r['dependencia_id'],
                nombre=r['dependencia_nombre'].decode('utf-8'))

        try:
            puesto = list(g.puestos.index.lookup(id=r['puesto_id']))[0]
        except IndexError:
            puesto = g.puestos.create(
                id=r['puesto_id'], nombre=r['puesto_nombre'].decode('utf-8'))

        try:
            persona = list(g.personas.index.lookup(id=r['per_id']))[0]
        except IndexError:
            persona = g.personas.create(
                dni=r['per_dni'],
                nombre_y_apellido=r['per_nya'].decode('utf-8'),
                id=r['per_id'])

        g.nombramientos.create(persona, dependencia,
                               {'puesto_id': r['puesto_id']})
        g.plantel.create(dependencia, puesto, {'persona_id': r['per_id']})

        LOG.info('%r - %r - %r %r' % (
            articulo,
            dependencia,
            puesto,
            persona,
        ))
Exemplo n.º 10
0
def main():
    mode = 'train'
    G = Graph(mode=mode)
    print('{} graph loaded.'.format(mode))
    saver = tf.train.Saver()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.5)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                          log_device_placement=False,
                                          allow_soft_placement=True)) as sess:
        writer = tf.summary.FileWriter(hp.LOG_DIR, sess.graph)
        try:
            print(f'Try to load trained model in {hp.MODEL_DIR} ...')
            saver.restore(sess, tf.train.latest_checkpoint(hp.MODEL_DIR))
        except:
            print(
                'Load trained model failed, start training with initializer ...'
            )
            sess.run(tf.global_variables_initializer())
        finally:
            sess.run(tf.local_variables_initializer())
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            try:
                while not coord.should_stop():
                    _, loss, summary, steps = sess.run(
                        [G.train_op, G.loss, G.merged, G.global_step])
                    print('train mode \t steps : {} \t loss : {}'.format(
                        steps, loss))
                    writer.add_summary(summary=summary, global_step=steps)
                    if steps % (hp.PER_STEPS + 1) == 0:
                        saver.save(
                            sess,
                            os.path.join(
                                hp.MODEL_DIR,
                                'model_{}los_{}steps'.format(loss, steps)))
            except tf.errors.OutOfRangeError:
                print('Training Done.')
            finally:
                coord.request_stop()
            coord.join(threads)
Exemplo n.º 11
0
def main():
    print("line 69")
    fpath = './paralleldata/VCC2SF1/SF1|10001.wav'
    fpath = fpath.strip()
    print(fpath)  # ./data/convert/TMM1/M10007.wav

    new_wav_for_infer, sr = librosa.load(fpath, sr=hp.SR, mono=True, dtype=np.float32)  # 返回音频信号值 & 采样率

    ori_mel = melspectrogram(new_wav_for_infer, hparams)  # 斌的:原始说话人的sp,接下来用np.random.normal生成同样形状的
    ori_mel = ori_mel.T
    print('源说话人的mel.shape:')
    print(ori_mel.shape)  # sp 是二维的!!!

    mode = 'infer'
    G = Graph(mode=mode)
    print('{} graph loaded.'.format(mode))
    saver = tf.train.Saver()
    with tf.Session(config=tf.ConfigProto(log_device_placement=False, allow_soft_placement=True)) as sess:
        try:
            print(f'Try to load trained model in {hp.MODEL_DIR} ...')
            saver.restore(sess, tf.train.latest_checkpoint(hp.MODEL_DIR))
        except:
            raise Exception(f'Load trained model failed in {hp.MODEL_DIR}, please check ...')
        finally:
            # ori_mel = np.reshape(ori_mel, (-1, hp.CODED_DIM))  # 斌的:原始说话人的sp
            ori_mel_batch = np.expand_dims(ori_mel, axis=0)   # 模型训练的时候是三维度的  # ???;变成【1,None,80】

            # aim_spkid_batch = np.array([[aim_spkid]])
            for j in tqdm.tqdm(range(1)):
                aim_out = sess.run(G.aim_out, {G.ori_mel: ori_mel_batch})

            aim_out = np.array(aim_out, dtype=np.float64)  # 转换出来的 mel,三维的(第一个是batch=1)
            predict_mel = np.reshape(aim_out, (-1, hp.CODED_DIM))  # [None, 80],转换成二维mel
            print("line 103 predict_mel.shape = "+str(predict_mel.shape))
            predict_mel = predict_mel.T
            # ori_new_f0 = np.random.normal(source_mean_f0, 1.0, predict_mel.shape[0])

            print('Sp predicted done.')
            synthesis(fpath, aim_mel=predict_mel, )
Exemplo n.º 12
0
def main(self):
    mode = 'train'
    g = Graph(mode=mode)
    print('{} graph loaded.'.format(mode))
    saver = tf.train.Saver()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.9)
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                          log_device_placement=False,
                                          allow_soft_placement=True)) as sess:
        writer = tf.summary.FileWriter(hp.log_dir, sess.graph)
        loaded = False
        try:
            try:
                print('try to load trained model in {} ...'.format(
                    hp.model_dir))
                saver.restore(sess, tf.train.latest_checkpoint(hp.model_dir))
                loaded = True
            finally:
                if loaded is False:
                    print(
                        'load trained model failed, start training with initializer ...'
                    )
                sess.run(tf.global_variables_initializer())
                while 1:
                    _, loss, summary, steps = sess.run(
                        [g.train_op, g.loss, g.merged, g.global_step])
                    print('train mode \t steps  : {}, loss : {}'.format(
                        steps, loss))
                    writer.add_summary(summary, steps)
                    if steps % (hp.per_steps + 1) == 0:
                        saver.save(
                            sess,
                            os.path.join(hp.model_dir,
                                         'model_{}'.format(steps)))
        except:
            print('train over.')
def main():
    mode = 'train'
    G = Graph(mode=mode)
    print('{} graph loaded.'.format(mode))
    saver = tf.train.Saver()
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)  # 这里的1
    with tf.Session(config=tf.ConfigProto(gpu_options=gpu_options,
                                          log_device_placement=False,
                                          allow_soft_placement=True)) as sess:
        writer = tf.summary.FileWriter(hp.LOG_DIR, sess.graph)
        try:
            print(f'Try to load trained model in {hp.MODEL_DIR} ...')
            # MODEL_DIR = './models'
            saver.restore(sess, tf.train.latest_checkpoint(hp.MODEL_DIR))
            # 已经训练好的模型的重新倒入到sess 里面去,然后下面就可以直接使用 sess 来运行了(sess.run....)
        except:
            print(
                'Load trained model failed, start training with initializer ...'
            )
            sess.run(tf.global_variables_initializer())
        finally:
            sess.run(tf.local_variables_initializer())
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            try:
                while not coord.should_stop():
                    # 上面这局也是固定格式,配合 tf.train.batch()
                    steps = 1  # 一步一步来???
                    G_loss = 0  # 初始化
                    # D_loss = 0
                    # if steps % 5 != 0:
                    _, kl_loss, G_loss, summary, steps = sess.run([
                        G.G_train_op, G.kl_loss, G.G_loss, G.merged,
                        G.global_step
                    ])

                    print('train mode \t steps : {} \t '
                          'kl_loss : {} \t '
                          'G_total_loss : {}'.format(steps, kl_loss, G_loss))

                    writer.add_summary(summary=summary, global_step=steps)
                    saver.save(
                        sess,
                        os.path.join(
                            hp.MODEL_DIR,
                            'model_%.3fGlos_%dsteps' % (G_loss, steps)))
                    # else:
                    #     _, kl_loss, G_loss, summary, steps = sess.run(
                    #         [G.G_train_op, G.kl_loss, G.G_loss, G.merged, G.global_step])
                    #     print('train mode \t steps : {} \t '
                    #           'kl_loss : {} \t '
                    #           'G_total_loss : {}'.format(steps,
                    #                                      kl_loss,
                    #                                      G_loss))
                    #     writer.add_summary(summary=summary, global_step=steps)
                    #
                    #     saver.save(sess, os.path.join(hp.MODEL_DIR, 'model_%.3fGlos_%dsteps' % (G_loss, steps)))
                    # else:
                    #     summary, steps = sess.run([G.merged, G.global_step])
                    #
                    #     print('train mode \t steps : {} '.format(steps,))

                    # writer.add_summary(summary=summary, global_step=steps)
                    # if steps % (hp.PER_STEPS + 1) == 0:
                    #     # hp.PER_STEPS = 1000,总共只有760+step,所以不行,改为100
                    #     saver.save(sess, os.path.join(hp.MODEL_DIR, 'model_%.3fGlos_%dsteps' % (G_loss, steps)))

            except tf.errors.OutOfRangeError:
                print('Training Done.')
            finally:
                coord.request_stop()
            coord.join(threads)
Exemplo n.º 14
0
def main():
    # spkid是代表你想转化到哪个人  fpath是原始音频
    fpath = './data/wavs/F10001.wav'
    aim_spkid = 1
    # 这里找一下上面这个目标人的一个音频?os.listdir
    fpath_aim = {
        'TMM1': 1,
        'TEF2': 2,
        'TGM1': 3,
        'TGF1': 4,
        'SEF1': 5,
        'TEF1': 6,
        'TEM1': 7,
        'TFM1': 8,
        'TMF1': 9,
        'SEM2': 10,
        'TFF1': 11,
        'SEM1': 12,
        'TEM2': 13,
        'SEF2': 14
    }
    aim_spkname = './data/convert/'
    for spk, num in fpath_aim.items():
        if num == aim_spkid:
            print(spk)
            aim_spkname = aim_spkname + spk  # 字符串拼接,成为路径

    files = os.listdir(aim_spkname)  # 存储目标人文件的文件夹
    aimpath = os.path.join(aim_spkname, files[0])
    print(aimpath)  # ./data/convert/TMM1/M10007.wav

    source_mean_f0 = get_mean_f0(fpath)
    target_mean_f0 = get_mean_f0(aimpath)
    diff_mean_f0 = target_mean_f0 - source_mean_f0
    source_mean_f0 += diff_mean_f0
    print(source_mean_f0)  # 这里的source_mean_f0还只是一个数字

    ori_feat = get_sp(fpath)  # 斌的:原始说话人的sp,接下来用np.random.normal生成同样形状的
    print('源说话人的sp:')
    print(ori_feat)  # sp 是二维的!!!
    # 合成要用目标说话人的sp啊!不然帧数对不上
    # mine:不对,应该要用目标人的预测出来的 sp,【60*628】,直接取.shape[0]=628就可以
    # aim_feat = get_sp(aimpath)  # 目标说话人的sp,为了用它的形状
    # aim_feat_to_onedim = aim_feat.flatten()

    # print(aim_feat.shape)    # sp.shape = [60 * 531]=维度 * 帧
    # sp维度 帧*维度,f0 维度 帧

    #  换到下面预测的地方去:
    #  ori_new_f0 = np.random.normal(source_mean_f0,1.0,aim_feat.shape[0])  # 以目标说话人的形状塑造出来的新f0
    # 维度不对,f0是一维,sp是二维
    #print(ori_new_f0) # 这是得到的新f0
    # print(ori_feat)
    mode = 'infer'
    G = Graph(mode=mode)
    print('{} graph loaded.'.format(mode))
    saver = tf.train.Saver()
    with tf.Session(config=tf.ConfigProto(log_device_placement=False,
                                          allow_soft_placement=True)) as sess:
        try:
            print(f'Try to load trained model in {hp.MODEL_DIR} ...')
            saver.restore(sess, tf.train.latest_checkpoint(hp.MODEL_DIR))
        except:
            raise Exception(
                f'Load trained model failed in {hp.MODEL_DIR}, please check ...'
            )
        finally:
            ori_feat = np.reshape(ori_feat, (-1, hp.CODED_DIM))  # 斌的:原始说话人的sp
            ori_feat_batch = np.expand_dims(ori_feat, axis=0)  # 模型训练的时候是三维度的
            aim_spkid_batch = np.array([[aim_spkid]])
            for j in tqdm.tqdm(range(1)):
                aim_out = sess.run(G.aim_out, {
                    G.ori_feat: ori_feat_batch,
                    G.aim_spk: aim_spkid_batch
                })

            print('\n ori_feat_batch.shape = ')
            print(ori_feat_batch.shape)
            print('aim_spkid_batch,shape = ')
            print(aim_spkid_batch.shape)

            aim_out = np.array(aim_out, dtype=np.float64)
            predict_sp = np.reshape(aim_out, (-1, hp.CODED_DIM))
            # print(predict_sp.shape)  # [628,60]:sp维度 帧*维度,f0 维度 帧
            # print('predict_sp.shape[0]=')
            # print(predict_sp.shape[0])  # 628
            ori_new_f0 = np.random.normal(source_mean_f0, 1.0,
                                          predict_sp.shape[0])

            print('源说话人的 400 帧的60维的 sp = ')
            print(ori_feat_batch[0][399][:])  # (B, T, CODED_DIMS)
            print(aim_spkid_batch[0][0])

            print('predict_sp.shape = ')
            print(predict_sp.shape)
            print('\n目标说话人的 400 帧的60维的 sp = ')
            print(predict_sp[399][:])  # (B, T, CODED_DIMS)

            for i in range(len(ori_feat_batch[0][399][:])):
                print(
                    str(i) + ',' + str(ori_feat_batch[0][399][i]) + ',' +
                    str(predict_sp[399][i]))

            print('Sp predicted done.')
            synthesis(fpath, predict_sp, aim_spkid)
Exemplo n.º 15
0
        time.strftime('%Y-%m-%d %H-%M-%S', time.localtime(time.time())))
else:
    pass
    print("\n测试模式...")
    # timestamp = input("请输入要restore的模型生成时间戳,例如2018-12-12 19-58-25:")
    timestamp = "2018-12-19 17-39-12"
    # timestamp = "2018-12-19 17-03-59"
    print("\n当前加载的模型是: ", timestamp, '\n')
    args.is_training = False

args.model_path = os.path.join('.', "model", timestamp + "/")
# args.model_path = os.path.join('.', "model", 'test' + "/")
if not os.path.exists(args.model_path):
    os.makedirs(args.model_path)

if args.is_training:

    model = Graph(args, cn_words_embedding, en_words_embedding,
                  cn_word2id_dict, en_word2id_dict, en_id2word_dict)
    model.build_graph()
    model.train(cn_train_data, en_train_data)

else:

    args.model_path = tf.train.latest_checkpoint(args.model_path)
    model = Graph(args, cn_words_embedding, en_words_embedding,
                  cn_word2id_dict, en_word2id_dict, en_id2word_dict)
    model.build_graph()
    print("=============================")
    model.test(cn_dev_data, en_dev_data)
Exemplo n.º 16
0
def makeUnit(expname, exptype, directory, printSubsequence, detail, allcrash):
    apelog_fname = os.path.join(directory, 'ape_stdout_stderr.txt')
    logcat_fname = os.path.join(directory, 'logcat.txt')
    mtdata_directories = glob.glob(os.path.join(directory, 'mt_data', '*'))

    assert os.path.isfile(apelog_fname), apelog_fname
    assert os.path.isfile(logcat_fname), logcat_fname

    modelobjects = glob.glob(os.path.join(directory, 'ape', '*', 'sataModel.obj'))
    if len(modelobjects) < 1:
        print("There is no model object in {}".format(directory))
        with open(apelog_fname, 'rt') as f:
            num_lines = int(run_cmd('wc -l {}'.format(apelog_fname)).split()[0])

            for i, line in enumerate(f):
                if i >= num_lines - 10:
                    print(' {}: {}'.format(i, line.rstrip()))

        return None

    assert len(modelobjects) == 1, modelobjects
    modelobject_fname = modelobjects[0]

    warningCounter = Counter()
    waitCounter = Counter()
    crashLongMessagesCounter = Counter()
    time_elapsed = -1
    strategy_cnt = {}
    first_timestamp = -1
    strategy_changed_timestamp = -1
    first_met_timestamp = -1
    timestamp = -1
    with open(apelog_fname, 'rt') as f:
        it = iter(f)
        for line in it:
            line = line.rstrip()
            if line.startswith('[APE_MT_WARNING]'):
                warningCounter.append(line)

            gp = re.match(r'\[APE\] ([a-zA-Z]+) Strategy: buffer size \(([0-9]+)\)', line)
            if gp:
                nums = [re.match(r'\[APE\] *([0-9]+)  (.*)', next(it).rstrip()) for strategy in strategies[gp.group(1)]]
                strategy_cnt = dict(map(lambda gp:(gp.group(2), int(gp.group(1))), nums))
                continue

            elif line.startswith('## Network stats: elapsed time='):
                gp = re.match(r'## Network stats: elapsed time=([0-9]+)ms \(([0-9]+)ms mobile, ([0-9]+)ms wifi, ([0-9]+)ms not connected\)', line)
                assert gp, line
                total, mob, wifi, total2 = gp.groups()
                assert total == total2 and mob == wifi, line
                time_elapsed = int(total)

            elif line == "[APE] *** INFO *** We are still waiting for activity loading. Let's wait for another 100ms...":
                waitCounter.append(line)

            elif line.startswith("[APE] // Long Msg: "):
                crashLongMessagesCounter.append(line[len("[APE] // Long Msg: "):])

                if allcrash:
                    print(line)
                    while line.startswith("[APE] //"):
                        line = next(it).rstrip()
                        print(line)
                    assert line.startswith('[APE] *** INFO *** Appending crash [')
                    current_timestamp = int(line.split('@')[1])
                    print('*** Time elapsed {}'.format(current_timestamp - first_timestamp))
                    print()

            elif line == "[APE] *** INFO *** Half time/counter consumed":
                strategy_changed_timestamp = timestamp

            elif line.startswith("[MonkeyServer] idle fetch"):
                gp = re.match(r'\[MonkeyServer\] idle fetch ([-]{0,1}[0-9]+)', line)
                assert gp, line
                timestamp = int(gp.group(1))
                if first_timestamp == -1 and timestamp not in [0, -1]:
                    first_timestamp = timestamp
            elif line == '[APE_MT] Lastlast transition met target':
                if first_met_timestamp == -1:
                    first_met_timestamp = timestamp
            elif 'MET TARGET' in line: # for old version
                if first_met_timestamp == -1:
                    first_met_timestamp = timestamp


    if time_elapsed == -1:
        print("Time elapsed not found")
        with open(apelog_fname, 'rt') as f:
            num_lines = int(run_cmd('wc -l {}'.format(apelog_fname)).split()[0])

            for i, line in enumerate(f):
                if i >= num_lines - 10:
                    print(' {}: {}'.format(i, line.rstrip()))
        return None
    targetMethods = []
    lazy_counter = Counter()
    registered_counter = Counter()
    registered_lazily_counter = Counter()
    with open(logcat_fname, 'rt') as f:
        for line in f:
            if 'MiniTrace' not in line:
                continue

            line = line.rstrip()
            line = line[line.index('MiniTrace'):]

            gp = re.match(r'MiniTrace: TargetMethod (.*):(.*)\[(.*)\] ([a-z ]+)', line)
            if gp:
                clsname, mtdname, signature, status = gp.groups()
                if status == 'lazy':
                    clsname = 'L{};'.format(clsname)
                    if (clsname, mtdname, signature) not in targetMethods:
                        targetMethods.append((clsname, mtdname, signature))
                    lazy_counter.append((clsname, mtdname, signature))
                elif status == 'registered':
                    clsname = 'L{};'.format(clsname)
                    if (clsname, mtdname, signature) not in targetMethods:
                        targetMethods.append((clsname, mtdname, signature))
                    lazy_counter.append((clsname, mtdname, signature))
                else:
                    assert status == 'registered lazily', status
                    assert clsname[0] == 'L' and clsname[-1] == ';', clsname
                    registered_lazily_counter.append((clsname, mtdname, signature))
    method_register_status = {}
    for method in targetMethods:
        if registered_counter[method] == 0:
            data =  '{}/{}'.format(registered_lazily_counter[method], lazy_counter[method])
        else:
            assert registered_lazily_counter[method] == 0, registered_lazily_counter[method]
            assert lazy_counter[method] == 0, lazy_counter[method]
            data = '{}/{}'.format(registered_counter[method], registered_counter[method])
        method_register_status[method] = data

    # self.exptype = exptype
    # self.expname = expname
    # self.directory = directory

    # marked call
    class MtdCounter(object):
        def __init__(self):
            self.cnt = 0
            self.main_tid = -1
            self.main_cnt = 0
        def setTid(self, main_tid):
            self.main_tid = main_tid
        def inc(self, value):
            self.main_cnt += 1
        def tidInc(self, tid, value):
            assert self.main_tid != -1
            if tid == self.main_tid:
                self.main_cnt += 1
            self.cnt += 1

    class ExecutionData:
        def __init__(self, string):
            assert all(c in ['0', '1'] for c in string), string
            self.string = string

        def union(self, new_string):
            assert len(self.string) == len(new_string)
            new_string = ''.join(['1' if a == '1' or b == '1' else '0' \
                for a, b in zip(self.string, new_string)])
            self.string = new_string

        def __or__(self, other):
            assert len(self.string) == len(other.string)
            new_string = ''.join(['1' if a == '1' or b == '1' else '0' \
                for a, b in zip(self.string, other.string)])
            return ExecutionData(new_string)

        def __repr__(self):
            return '<ExecutionData string={}>'.format(self.string)

        def ratio(self):
            return self.string.count('1') / len(self.string)

    sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../crashgen'))
    from consumer import parse_data, Threads, Methods

    execution_data = {}
    mtdCounter = MtdCounter()
    for mtdata_directory in mtdata_directories:
        binary_fname = os.path.join(mtdata_directory, 'data_0.bin')
        thread_fname = os.path.join(mtdata_directory, 'info_t.log')
        method_fname = os.path.join(mtdata_directory, 'info_m.log')

        if any(not os.path.isfile(fname) for fname in [binary_fname, thread_fname, method_fname]):
            continue
        mtdCounter.setTid(Threads(thread_fname).get_main_tid())
        parse_data(binary_fname, {10: mtdCounter.inc, 11: mtdCounter.inc, 12: mtdCounter.inc,
            13: mtdCounter.tidInc, 14: mtdCounter.tidInc, 15: mtdCounter.tidInc}, verbose=False)

        methods = Methods(method_fname)
        execf = os.path.join(mtdata_directory, 'exec.txt')
        with open(execf, 'rt') as f:
            for line in f:
                line = line.rstrip()
                if line.startswith('\0'):
                    line = line[1:]
                if line == '':
                    break
                if not line.startswith('Timestamp'):
                    mtdptr, execdata = line.split()
                    mtdptr = int(mtdptr, 16)
                    clsname, mtdname, signature, defclass = methods[mtdptr]
                    try:
                        execution_data[(clsname, mtdname, signature)].union(execdata)
                    except KeyError:
                        execution_data[(clsname, mtdname, signature)] = ExecutionData(execdata)


    string = '{},{}'.format(expname, exptype)
    string += ',{},{},{},'.format(time_elapsed, warningCounter.total(), waitCounter.total())

    assert all(method in targetMethods for method in execution_data)
    method_data = []
    for method in targetMethods:
        if method not in execution_data:
            data = 0.0
        else:
            data = execution_data[method].ratio()
        method_data.append('%s:%.3f' % (method_register_status[method], data))
    string += '/'.join(method_data)
    string += ',{},{}'.format(strategy_cnt.get('TARGET', 0) + strategy_cnt.get('MCMC', 0), sum(strategy_cnt.values()))
    string += ',{},{}'.format(mtdCounter.main_cnt, mtdCounter.cnt)

    # strategy changed timestamp
    if strategy_changed_timestamp == -1:
        string += ',NaN'
    else:
        string += ',{}'.format(strategy_changed_timestamp - first_timestamp)
    # first met transition timestmap
    if first_met_timestamp == -1:
        string += ',NaN'
    else:
        string += ',{}'.format(first_met_timestamp - first_timestamp)

    warningCounter.pretty()
    if not detail:
        string += ',{}'.format(crashLongMessagesCounter.total())
        string += ',{}'.format('/'.join(map(lambda tup:'{} {} [{}]'.format(*tup), targetMethods)))
        return string

    # analysis for sataModel.obj
    # crashes with targets
    # GUITreeTransition marked/total
    # State marked/total
    # StateTransition marked/total
    # unique subsequences (>=3 times / at least once)
    # @TODO State score
    # @TODO StateTransition score
    data = []
    import javaobj
    from common import classReadJavaList, readJavaList
    from tree import GUITree
    from model import Model, Graph, StateTransition
    try:
        with open(modelobject_fname, 'rb') as f:
            model = Model(javaobj.loads(f.read()))
    except Exception:
        return string + ',javaobjError'

    graph = Graph.init(model.graph)

    # crashes
    crashWithTargetMethodsCounter = Counter("crashes with target")
    crashWithoutTargetMethodsCounter = Counter("crashes without target")
    firstMoment = -1
    firstMomentTargetMethods = -1
    records = model.actionHistory
    for record in readJavaList(records):
        if firstMoment == -1:
            firstMoment = record.clockTimestamp
        if not record.guiAction:
            action = record.modelAction
            constant = action.type.constant
            if constant == 'PHANTOM_CRASH':
                # check stackTrace
                stackTrace = action.crash.stackTrace
                metTarget = False
                for line in stackTrace.split('\n'):
                    if metTarget:
                        break
                    for (clsname, mtdname, signature) in targetMethods:
                        if mtdname in line and clsname[1:-1].split('/')[-1] in line:
                            metTarget = True
                            break
                if metTarget:
                    crashWithTargetMethodsCounter.append(action.crash.stackTrace)
                    if firstMomentTargetMethods == -1:
                        firstMomentTargetMethods = record.clockTimestamp
                else:
                    crashWithoutTargetMethodsCounter.append(action.crash.longMsg)

    crashWithTargetMethodsCounter.pretty()
    crashWithoutTargetMethodsCounter.pretty()
    if firstMomentTargetMethods == -1:
        data.append('NaN')
    else:
        data.append(firstMomentTargetMethods - firstMoment)
    data.append(crashWithTargetMethodsCounter.total())
    data.append(crashLongMessagesCounter.total())

    treeHistory = graph.treeTransitionHistory
    marked_gtransitions = []
    marked_transitions = set()
    for gtransition in treeHistory:
        if gtransition.hasMetTargetMethod:
            marked_gtransitions.append(gtransition)
            marked_transitions.add(gtransition.stateTransition)

    data.append(len(marked_gtransitions))
    data.append(len(treeHistory))

    targetGUITrees = classReadJavaList(graph.metTargetMethodGUITrees, GUITree)
    targetStates = set(map(lambda t:t.getCurrentState(), targetGUITrees))
    targetStateIds = set(map(lambda t:id(t.currentState), targetGUITrees))
    assert len(targetStates) == len(targetStateIds), (len(targetStates), len(targetStateIds))

    data.append(len(targetStates))
    data.append(graph.size())

    # Split with marked State (old) -> Split with marked StateTransition
    from parseobj import getSubsequences, TargetSubsequence
    subsequences = getSubsequences(model, graph)
    subseqCounter = Counter()
    targetSubsequences = []
    for seq in subsequences:
        targetSubsequence = TargetSubsequence(seq[0], seq.getTimestampDelta(0))
        for i in range(1, len(seq)):
            tr = seq[i]
            timestamp = seq.getTimestampDelta(i)
            if tr.hasMetTargetMethod == True:
            # if id(tr.source.currentState) in targetStateIds:
                subseqCounter.append(targetSubsequence)
                targetSubsequences.append(targetSubsequence)
                targetSubsequence = TargetSubsequence(tr, timestamp)
            else:
                targetSubsequence.append(tr, timestamp)
        # subseqCounter.append(targetSubsequence)
        targetSubsequence.setActionRecords(seq.actionRecordsAtEnd)
        targetSubsequences.append(targetSubsequence)

    data.append(len([s for s in subseqCounter.values() if s >= 3]))
    data.append(len(subseqCounter))
    if printSubsequence:
        currentRunningSubsequences = []
        for subsequence in targetSubsequences:
            currentRunningSubsequences.append(subsequence)
            # if subsequence.containsTarget():
            crash = subsequence.getCrash()
            if crash:
                print('Subsequence newly emrged# {}'.format(subsequence.timestamps[0]))
                print(crash['stackTrace'])
                for subsequence in currentRunningSubsequences:
                    print(subsequence.seqdetail())
                print()
                currentRunningSubsequences = []
            # print('{}: {}'.format(subsequence.timestamps[0], subsequence.seqdetail()))

    string += ',' + ','.join(map(str, data))
    if len(subseqCounter) == 0:
        string += ',NaN,NaN,NaN,NaN,NaN,NaN'
    else:
        # #subseq with len <=1, <=2, <=3, <=4, <=5
        keys = subseqCounter.keys()
        cnts = []
        for sz in [1, 2, 3, 4, 5]:
            cnts.append(len([s for s in keys if len(s) <= sz]))
        cnts = tuple(cnts)
        string += ',%.2f,%d,%d,%d,%d,%d' % ((subseqCounter.total() / len(subseqCounter),) + cnts)

    # statistics for state / transition
    state_scores = []
    for state in targetStates:
        state_scores.append(graph.metTargetScore(state))
    if state_scores != []:
        state_scores = np.array(state_scores)
        string += ',%d,%.3f,%.3f,%.3f,%.3f' % (
            len(state_scores),
            np.min(state_scores),
            np.max(state_scores),
            np.average(state_scores),
            np.std(state_scores))
    else:
        string += ',0,NaN,NaN,NaN,NaN'

    transition_scores = []
    for transition in marked_transitions:
        transition_scores.append(StateTransition.init(transition).metTargetRatio())
    if transition_scores != []:
        transition_scores = np.array(transition_scores)
        string += ',%d,%.3f,%.3f,%.3f,%.3f' % (
            len(transition_scores),
            np.min(transition_scores),
            np.max(transition_scores),
            np.average(transition_scores),
            np.std(transition_scores))
    else:
        string += ',0,NaN,NaN,NaN,NaN'
    string += ',{}'.format('/'.join(map(lambda tup:'{} {} [{}]'.format(*tup), targetMethods)))

    return string
import os

import numpy as np
import pandas as pd

from model import Graph

DATA_DIR = 'data'
DATA_CSV = 'karate.csv'
OUTPUT_DIR = 'outputs'
OUTPUT_CSV = 'karate_club_nn.csv'

if __name__ == '__main__':
    df = pd.read_csv(os.path.join(DATA_DIR, DATA_CSV))
    edge_list = df.values

    g = Graph()
    g.addEdgesFromList(edge_list)
    results = g.getShortestPath(7)
    results = pd.DataFrame(results.items(), columns=['id', 'distance'])
    results.to_csv(os.path.join(OUTPUT_DIR, OUTPUT_CSV), index=False)
def generate_random_graph(n, p):
    g = Graph()
    for u, v in combinations(range(n), 2):
        if random.random() < p:
            g.addEdge(u, v)
    return g
Exemplo n.º 19
0
    if args.st:
        # print state / statetransitions with met target
        for directory in args.directories:
            print('Experiment {}'.format(directory))
            import javaobj
            from common import classReadJavaList, readJavaList
            from tree import GUITree
            from model import Model, Graph, StateTransition
            with open(glob.glob(os.path.join(directory, "ape", "*", "sataModel.obj"))[0], 'rb') as f:
                try:
                    model = Model(javaobj.loads(f.read()))
                except Exception as e:
                    print(e)
                    continue

            graph = Graph.init(model.graph)
            targetTransitions = set()
            for gt in graph.treeTransitionHistory:
                if gt.hasMetTargetMethod:
                    st = StateTransition.init(gt.stateTransition)
                    targetTransitions.add(st)

            for st in sorted(list(targetTransitions), key=lambda t:repr(t)):
                print(st)


    elif args.detail:
        results = []
        for exp in args.directories:
            while exp.endswith('/'):
                exp = exp[:-1]
Exemplo n.º 20
0
from model import Graph
from train import read_data, extract_entity

load_model_path = 'output/subject_model.weights'
train_data, dev_data, test_data, id2class, class2id = read_data()
_, test_model = Graph(0, 0, 0, 0)
test_model.load_weights(load_model_path)


def predict(content, cls):
    return extract_entity(content, cls, class2id, test_model)


if __name__ == '__main__':
    while 1:
        content = input('content: ')
        cls = input('cls: ')
        res = predict(content, cls)
        print(res)
Exemplo n.º 21
0
from datetime import datetime
from utils import date_diff
import re

# configuration
DATABASE = 'graphdatabase'
DEBUG = True
SECRET_KEY = 'blubber'
USERNAME = '******'
PASSWORD = '******'
ADMIN = 'admin'

app = Flask(__name__)
app.config.from_object(__name__)

db = Graph()


@app.template_filter('getParlaments')
def getParlaments(eid):
    v = db.vertices.get(eid)
    if v.element_type == 'instance':
        ps = [data(p) for p in v.outV('instanceHasParlament')]
    elif v.element_type == 'proposal':
        ps = [data(p) for p in v.outV('proposalHasParlament')]
    return ps


@app.template_filter('getPeople')
def getPeople(eid):
    ''' returns all people of the Instance eid '''
Exemplo n.º 22
0
    learning_rate = 1e-3
    min_learning_rate = 1e-5
    epochs = 100
    is_test = False

    train_data, dev_data, test_data, id2class, class2id = read_data()

    total_steps, warmup_steps = calc_train_steps(
        num_example=len(train_data),
        batch_size=batch_size,
        epochs=epochs,
        warmup_proportion=0.1,
    )

    model, test_model = Graph(total_steps,
                              warmup_steps,
                              lr=learning_rate,
                              min_lr=min_learning_rate)

    if is_test:
        test_model.load_weights('output/subject_model.weights')
        model.load_weights('output/subject_model.weights')
        test(test_data, class2id, test_model)
        # acc = dev(dev_data, class2id, test_model)
        # print('acc: ', acc)
    else:
        # test_model.load_weights('output/subject_model.weights')
        # model.load_weights('output/subject_model.weights')

        evaluator = Evaluate(dev_data, model, test_model, class2id)
        X = data_generator(train_data, batch_size)
        steps = int((len(train_data) + batch_size - 1) / batch_size)
Exemplo n.º 23
0
        for d in tqdm(iter(test_data)):
            s = str(d[0]) + ',' + extract_entity(d[1].replace('\t', ''), d[2],
                                                 class2id, test_model)
            file.write(s + '\n')


if __name__ == '__main__':
    batch_size = 16
    learning_rate = 1e-3
    min_learning_rate = 1e-6
    epochs = 10
    is_test = False

    train_data, dev_data, test_data, id2class, class2id = read_data()

    total_steps, warmup_steps = calc_train_steps(
        num_example=len(train_data),
        batch_size=batch_size,
        epochs=epochs,
        warmup_proportion=0.1,
    )

    model, test_model = Graph(total_steps,
                              warmup_steps,
                              lr=learning_rate,
                              min_lr=min_learning_rate)

    test_model.load_weights('output/subject_model.weights')
    model.load_weights('output/subject_model.weights')
    test(test_data, class2id, test_model)
Exemplo n.º 24
0
from presenter import Presenter
from view import GraphView
from model import Graph

Presenter(GraphView(), Graph()).start()
Exemplo n.º 25
0
import os

import numpy as np
import pandas as pd

from model import Graph

DATA_DIR = 'data'
DATA_CSV = 'network.csv'
OUTPUT_DIR = 'outputs'
OUTPUT_DOT = 'graph.dot'

if __name__ == '__main__':
    df = pd.read_csv(os.path.join(DATA_DIR, DATA_CSV))
    edge_list = df.values

    g = Graph()
    g.addEdgesFromList(edge_list)
    g.saveGraph(os.path.join(OUTPUT_DIR, OUTPUT_DOT))
Exemplo n.º 26
0
def main():
    # spkid是代表你想转化到哪个人  fpath是原始音频
    fpath = './data/wavs/F10001.wav'
    aim_spkid = 1
    # 这里找一下上面这个目标人的一个音频?os.listdir
    fpath_aim = {
        'TMM1': 1,
        'TEF2': 2,
        'TGM1': 3,
        'TGF1': 4,
        'SEF1': 5,
        'TEF1': 6,
        'TEM1': 7,
        'TFM1': 8,
        'TMF1': 9,
        'SEM2': 10,
        'TFF1': 11,
        'SEM1': 12,
        'TEM2': 13,
        'SEF2': 14
    }
    aim_spkname = './data/convert/'
    for spk, num in fpath_aim.items():
        if num == aim_spkid:
            print(spk)
            aim_spkname = aim_spkname + spk  # 字符串拼接,成为路径

    files = os.listdir(aim_spkname)  # 存储目标人文件的文件夹
    aimpath = os.path.join(aim_spkname, files[0])
    print(aimpath)  # ./data/convert/TMM1/M10007.wav

    ori_feat = get_sp(fpath)  # 斌的:原始说话人的sp

    mode = 'infer'
    G = Graph(mode=mode)
    print('{} graph loaded.'.format(mode))
    saver = tf.train.Saver()
    with tf.Session(config=tf.ConfigProto(log_device_placement=False,
                                          allow_soft_placement=True)) as sess:
        try:
            print(f'Try to load trained model in {hp.MODEL_DIR} ...')
            saver.restore(sess, tf.train.latest_checkpoint(hp.MODEL_DIR))
        except:
            raise Exception(
                f'Load trained model failed in {hp.MODEL_DIR}, please check ...'
            )
        finally:
            ori_feat = np.reshape(ori_feat, (-1, hp.CODED_DIM))  # 斌的:原始说话人的sp
            ori_feat_batch = np.expand_dims(ori_feat, axis=0)  # 模型训练的时候是三维度的
            aim_spkid_batch = np.array([[aim_spkid]])
            for j in tqdm.tqdm(range(1)):
                aim_out = sess.run(G.aim_out, {
                    G.ori_feat: ori_feat_batch,
                    G.aim_spk: aim_spkid_batch
                })

            aim_out = np.array(aim_out, dtype=np.float64)
            predict_sp = np.reshape(aim_out, (-1, hp.CODED_DIM))

            print('Sp predicted done.')
            synthesis(fpath, predict_sp, aim_spkid)
Exemplo n.º 27
0
                    fields = fields[1:]

                # More than one field
                if len(fields) > 1 or len(fields) == 0:
                    raise Exception('Unable to parse movie title on line ' +
                                    str(idx) + ':\n     ' + repr(fields))

                result = parse_movie_title(fields[0])
                if not result:
                    continue
                movie_title, year = result
                try:
                    m = graph.get_movie(movie_title, year)
                    actor.add_role(m)
                except UnknownMovieException as ex:
                    pass

            except Exception as ex:
                log(repr(debug))
                raise ex


if __name__ == '__main__':
    graph = Graph()
    actor_file, movie_file = sys.argv[1:]
    parse_movies(graph, movie_file)
    parse_actors(graph, actor_file)
    graph.store()
    with open('top250.bin', 'wb') as out:
        pickle.dump(graph, out)
Exemplo n.º 28
0
from model import Graph
from train import read_data, extract_entity

train_data, dev_data, test_data, id2class, class2id = read_data()
_, model = Graph(0, 0, 0, 0)
model.load_weights('output/subject_model.weights')


def predict(content, cls):
    res = extract_entity(content, cls, class2id, model)
    return res


if __name__ == '__main__':
    while 1:
        content = input('content:')
        cls = input('cls:')
        r = predict(content, cls)
        print(r)
Exemplo n.º 29
0
def main():
    mode = 'train'
    G = Graph(mode=mode)
    print('{} graph loaded.'.format(mode))
    saver = tf.train.Saver()  # 训练网络之后保存训练好的模型,以及在程序中读取已保存好的模型
    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=1)  # 这里的1
    # https://blog.csdn.net/c20081052/article/details/82345454
    # per_process_gpu_memory_fraction指定了每个GPU进程中使用显存的上限,
    # 但它只能均匀地作用于所有GPU,无法对不同GPU设置不同的上限。
    with tf.Session(config=tf.ConfigProto(
            gpu_options=gpu_options, log_device_placement=False, allow_soft_placement=True)) as sess:
        writer = tf.summary.FileWriter(hp.LOG_DIR, sess.graph)
        """
        # tf.summary.FileWritter(path, sess.graph)
        # 指定一个文件用来保存图。 LOG_DIR = './logs'
        gpu_options:每个GPU使用显存上限(集体定义,不能单独定义)
        log_device_placement=False : 是否打印设备分配日志
        allow_soft_placement=True : 如果你指定的设备不存在,允许TF自动分配设备
        """
        try:
            print(f'Try to load trained model in {hp.MODEL_DIR} ...')
            # MODEL_DIR = './models'
            saver.restore(sess, tf.train.latest_checkpoint(hp.MODEL_DIR))
            # 已经训练好的模型的重新倒入到sess 里面去,然后下面就可以直接使用 sess 来运行了(sess.run....)
        except:
            print('Load trained model failed, start training with initializer ...')
            sess.run(tf.global_variables_initializer())
        finally:
            # finally,就是不管前面报错与否,都会进行的环节,包括return,也会先return finally里面的return语句;
            # 下面几句的 局部初始化local_variables_initializer 以及start_queue_runners,是因为要用到 tf.train.batch 函数

            sess.run(tf.local_variables_initializer())
            coord = tf.train.Coordinator()
            threads = tf.train.start_queue_runners(coord=coord)
            try:
                while not coord.should_stop():
                    """
                    这里可以插入数据尺寸的后期处理
                    """
                    steps = 1
                    G_loss = 0
                    D_loss = 0
                    if steps % 5 != 0:
                        _, reconstruction_loss, cycle_loss, kl_loss, GAN_G_loss, G_loss, summary, steps = sess.run([G.G_train_op,
                                                                                                                    G.reconstruction_loss,
                                                                                                                    G.cycle_loss,
                                                                                                                    G.kl_loss,
                                                                                                                    G.GAN_G_loss,
                                                                                                                    G.G_loss,
                                                                                                                    G.merged,
                                                                                                                    G.global_step])
                        print('train mode \t steps : {} \t '
                              'reconstruction_loss : {} \t '
                              'cycle_loss : {} \t '
                              'kl_loss : {} \t '
                              'GAN_G_loss : {} \t '
                              'G_total_loss : {}'.format(steps,
                                                         reconstruction_loss,
                                                         cycle_loss,
                                                         kl_loss,
                                                         GAN_G_loss,
                                                         G_loss))
                    else:
                        _, D_fake_loss, D_real_loss, D_loss, summary, steps = sess.run([G.D_train_op,
                                                                                        G.D_fake_loss,
                                                                                        G.D_real_loss,
                                                                                        G.D_loss,
                                                                                        G.merged,
                                                                                        G.global_step])
                        print('train mode \t steps : {} \t '
                              'D_fake_loss : {} \t '
                              'D_real_loss : {} \t '
                              'D_total_loss : {}'.format(steps,
                                                         D_fake_loss,
                                                         D_real_loss,
                                                         D_loss))
                    writer.add_summary(summary=summary, global_step=steps)
                    # 可以调用tf.summary.FileWriter其add_summary()方法将训练过程数据保存在filewriter指定的文件中

                    if steps % (hp.PER_STEPS + 1) == 0:
                        saver.save(sess, os.path.join(hp.MODEL_DIR, 'model_%.3fGlos_%.3fDlos_%dsteps' % (G_loss,
                                                                                                         D_loss,
                                                                                                         steps)))

            except tf.errors.OutOfRangeError:
                print('Training Done.')
            finally:
                coord.request_stop()
            coord.join(threads)
Exemplo n.º 30
0
    # Config default value
    cfg = config.cfg

    # Training files name
    cfg.queue.filename = [
        os.path.join(os.path.dirname(os.path.basename(__file__)), "examples", "train{}.tfrecords").format(index)
        for index in range(args.train_first_file,
                           args.train_first_file +
                           args.train_set_size //
                           cfg.queue.nb_examples_per_file)]

    print(cfg.queue.filename)

    # Whether we create a validation set
    cfg.queue.is_val_set = args.val_set

    # Whether to train with adversarial cost
    cfg.gan.train_adversarial = args.train_adversarial

    # Size of a batch
    cfg.train.batch_size = args.batch_size

    # Build model and train or fill images
    b = Graph(cfg)
    b.build()
    if args.train:
        b.train()
    else:
        # TODO: add a queue for validation set (change args parameter in consequences)
        b.fill_image(20)