Ejemplo n.º 1
0
def test(request):
    """

    :param request:
    :return:
    """
    res = function.test(request)
    return render_json(res)
Ejemplo n.º 2
0
def train():
    with tf.Graph().as_default():
        global_step = tf.train.get_or_create_global_step()

        with tf.device('/cpu:0'):
            images, labels = function.get_inputs(eval_data=False)
            test_images, test_lables = function.get_inputs(eval_data=True)

        logits = function.inference(images)
        loss = function.loss(logits, labels)
        train_op = function.train_op(loss, global_step)

        test_logits = function.test(test_images)
        correct_prediction = tf.equal(tf.argmax(test_logits, 1),
                                      tf.argmax(test_lables, 1))
        accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))

        class _LoggerHook(tf.train.SessionRunHook):
            """Logs loss and runtime."""
            def begin(self):
                self._step = -1
                self._start_time = time.time()

            def before_run(self, run_context):
                self._step += 1
                return tf.train.SessionRunArgs(loss)  # Asks for loss value.

            def after_run(self, run_context, run_values):
                if self._step % config.FLAGS.log_frequency == 0:
                    current_time = time.time()
                    duration = current_time - self._start_time
                    self._start_time = current_time

                    loss_value = run_values.results
                    examples_per_sec = config.FLAGS.log_frequency * config.FLAGS.batch_size / duration
                    sec_per_batch = float(duration /
                                          config.FLAGS.log_frequency)

                    format_str = (
                        '%s: step %d, loss = %.2f (%.1f examples/sec; %.3f '
                        'sec/batch)')
                    print(format_str % (datetime.now(), self._step, loss_value,
                                        examples_per_sec, sec_per_batch))

        with tf.train.MonitoredTrainingSession(hooks=[
                tf.train.StopAtStepHook(last_step=config.FLAGS.max_steps),
                tf.train.NanTensorHook(loss),
                _LoggerHook()
        ], ) as mon_sess:
            while not mon_sess.should_stop():
                print("acc: %s" % mon_sess.run(accuracy))
                mon_sess.run(train_op)
Ejemplo n.º 3
0
    def forward(self, content, style, alpha=1.0):
        assert 0 <= alpha <= 1
        style_feats = self.encode_with_intermediate(style)
        content_feat = self.encode(content)

        # t = adain(content_feat, style_feats[-1])
        t = test(content_feat, style_feats[-1])
        t = alpha * t + (1 - alpha) * content_feat

        g_t = self.decoder(t)
        # 1 dimension to 3 dimension
        g_t = g_t.expand(-1, 3, -1, -1)
        g_t_feats = self.encode_with_intermediate(g_t)

        loss_c = self.calc_content_loss(g_t_feats[-1], t)
        loss_s = self.calc_style_loss(g_t_feats[0], style_feats[0])
        for i in range(1, 4):
            loss_s += self.calc_style_loss(g_t_feats[i], style_feats[i])
        return loss_c, loss_s, g_t
Ejemplo n.º 4
0
import function
lis = ['Hello ', 'Mother F****r!', 'Hahahaha!!!!']
print(function.test(*lis))
Ejemplo n.º 5
0
def main():
    args = parse_args()

    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED
    gpus = [int(i) for i in config.GPUS.split(',')]

    logger, final_output_dir, tb_log_dir = create_logger(
        config, args.cfg, 'train')

    writer_dict = {
        'writer': SummaryWriter(log_dir=tb_log_dir),
        'train_global_steps': 0,
        'valid_global_steps': 0,
    }

    # initialize generator and discriminator
    G_AB = eval('models.cyclegan.get_generator')(config.DATA.IMAGE_SHAPE,
                                                 config.NETWORK.NUM_RES_BLOCKS)
    G_BA = eval('models.cyclegan.get_generator')(config.DATA.IMAGE_SHAPE,
                                                 config.NETWORK.NUM_RES_BLOCKS)
    D_A = eval('models.cyclegan.get_discriminator')(config.DATA.IMAGE_SHAPE)
    D_B = eval('models.cyclegan.get_discriminator')(config.DATA.IMAGE_SHAPE)
    #logger.info(pprint.pformat(G_AB))
    #logger.info(pprint.pformat(D_A))

    # multi-gpus

    model_dict = {}
    model_dict['G_AB'] = torch.nn.DataParallel(G_AB, device_ids=gpus).cuda()
    model_dict['G_BA'] = torch.nn.DataParallel(G_BA, device_ids=gpus).cuda()
    model_dict['D_A'] = torch.nn.DataParallel(D_A, device_ids=gpus).cuda()
    model_dict['D_B'] = torch.nn.DataParallel(D_B, device_ids=gpus).cuda()

    # loss functions
    criterion_dict = {}
    criterion_dict['GAN'] = torch.nn.MSELoss().cuda()
    criterion_dict['cycle'] = torch.nn.L1Loss().cuda()
    criterion_dict['identity'] = torch.nn.L1Loss().cuda()

    # optimizers
    optimizer_dict = {}
    optimizer_dict['G'] = get_optimizer(
        config, itertools.chain(G_AB.parameters(), G_BA.parameters()))
    optimizer_dict['D_A'] = get_optimizer(config, D_A.parameters())
    optimizer_dict['D_B'] = get_optimizer(config, D_B.parameters())

    start_epoch = config.TRAIN.START_EPOCH
    if config.TRAIN.RESUME:
        start_epoch, model_dict, optimizer_dict = load_checkpoint(
            model_dict, optimizer_dict, final_output_dir)

    # learning rate schedulers
    lr_scheduler_dict = {}
    lr_scheduler_dict['G'] = get_lr_scheduler(config, optimizer_dict['G'])
    lr_scheduler_dict['D_A'] = get_lr_scheduler(config, optimizer_dict['D_A'])
    lr_scheduler_dict['D_B'] = get_lr_scheduler(config, optimizer_dict['D_B'])
    for steps in range(start_epoch):
        for lr_scheduler in lr_scheduler_dict.values():
            lr_scheduler.step()

    #Buffers of previously generated samples
    fake_A_buffer = ReplayBuffer()
    fake_B_buffer = ReplayBuffer()

    # Image transformations
    transforms_ = [
        #transforms.Resize(int(config.img_height * 1.12), Image.BICUBIC),
        #transforms.RandomCrop((config.img_height, config.img_width)),
        #transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]

    # Dataset
    logger.info('=> loading train and testing dataset...')

    train_dataset = ImageDataset(config.DATA.TRAIN_DATASET_B,
                                 config.DATA.TRAIN_DATASET,
                                 transforms_=transforms_)
    test_dataset = ImageDataset(config.DATA.TEST_DATASET_B,
                                config.DATA.TEST_DATASET,
                                transforms_=transforms_,
                                mode='test')
    # Training data loader
    train_dataloader = DataLoader(train_dataset,
                                  batch_size=config.TRAIN.BATCH_SIZE *
                                  len(gpus),
                                  shuffle=config.TRAIN.SHUFFLE,
                                  num_workers=config.NUM_WORKERS)
    # Test data loader
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=config.TEST.BATCH_SIZE * len(gpus),
                                 shuffle=False,
                                 num_workers=config.NUM_WORKERS)

    for epoch in range(start_epoch, config.TRAIN.END_EPOCH):

        train(config, epoch, model_dict, fake_A_buffer, fake_B_buffer,
              train_dataloader, criterion_dict, optimizer_dict,
              lr_scheduler_dict, writer_dict)

        test(config, model_dict, test_dataloader, criterion_dict,
             final_output_dir)

        for lr_scheduler in lr_scheduler_dict.values():
            lr_scheduler.step()

        if config.TRAIN.CHECKPOINT_INTERVAL != -1 and epoch % config.TRAIN.CHECKPOINT_INTERVAL == 0:
            logger.info('=> saving checkpoint to {}'.format(final_output_dir))
            save_checkpoint(
                {
                    'epoch': epoch + 1,
                    'model': 'cyclegan',
                    'state_dict_G_AB': model_dict['G_AB'].module.state_dict(),
                    'state_dict_G_BA': model_dict['G_BA'].module.state_dict(),
                    'state_dict_D_A': model_dict['D_A'].module.state_dict(),
                    'state_dict_D_B': model_dict['D_B'].module.state_dict(),
                    'optimizer_G': optimizer_dict['G'].state_dict(),
                    'optimizer_D_A': optimizer_dict['D_A'].state_dict(),
                    'optimizer_D_B': optimizer_dict['D_B'].state_dict(),
                }, final_output_dir)

    writer_dict['writer'].close()
Ejemplo n.º 6
0
class PredFunc(object):
    def __init__(self, miu, sigma, Pc, label):
        self.miu = miu
        self.sigma = sigma
        self.label = label
        self.Pc = Pc

    def cal(self, x):
        return self.Pc * np.prod(
            np.exp(-(x - self.miu)**2 / (2 * self.sigma**2)) /
            (2.507 * self.sigma))


def train(trainData, trainLabel):
    func = []
    num = len(trainLabel)
    newData, newLabel = F.divide(trainData, trainLabel)
    for C, y in zip(newData, newLabel):
        Pc = len(C) / num
        miu = np.average(C, axis=0)
        sigma = np.var(C, axis=0)
        func.append(PredFunc(miu, sigma, Pc, y))
    return func


trainData, trainLabel, _ = F.load(0, normalize)
testData, testLabel, _ = F.load(1, normalize)
func = train(trainData, trainLabel)
acc = F.test(testData, testLabel, func)
print(acc)
Ejemplo n.º 7
0
def main():
    args = parse_args()

    cudnn.benchmark = config.CUDNN.BENCHMARK
    torch.backends.cudnn.deterministic = config.CUDNN.DETERMINISTIC
    torch.backends.cudnn.enabled = config.CUDNN.ENABLED
    gpus = [int(i) for i in config.GPUS.split(',')]

    logger, final_output_dir, tb_log_dir = create_logger(
        config, args.cfg, 'test')

    # initialize generator and discriminator
    G_AB = eval('models.cyclegan.get_generator')(config.DATA.IMAGE_SHAPE,
                                                 config.NETWORK.NUM_RES_BLOCKS)
    G_BA = eval('models.cyclegan.get_generator')(config.DATA.IMAGE_SHAPE,
                                                 config.NETWORK.NUM_RES_BLOCKS)
    D_A = eval('models.cyclegan.get_discriminator')(config.DATA.IMAGE_SHAPE)
    D_B = eval('models.cyclegan.get_discriminator')(config.DATA.IMAGE_SHAPE)
    #logger.info(pprint.pformat(G_AB))
    #logger.info(pprint.pformat(D_A))

    # multi-gpus

    model_dict = {}
    model_dict['G_AB'] = torch.nn.DataParallel(G_AB, device_ids=gpus).cuda()
    model_dict['G_BA'] = torch.nn.DataParallel(G_BA, device_ids=gpus).cuda()
    model_dict['D_A'] = torch.nn.DataParallel(D_A, device_ids=gpus).cuda()
    model_dict['D_B'] = torch.nn.DataParallel(D_B, device_ids=gpus).cuda()

    # loss functions
    criterion_dict = {}
    criterion_dict['GAN'] = torch.nn.MSELoss().cuda()
    criterion_dict['cycle'] = torch.nn.L1Loss().cuda()
    criterion_dict['identity'] = torch.nn.L1Loss().cuda()

    if config.TEST.MODEL_FILE:
        _, model_dict, _ = load_checkpoint(model_dict, {},
                                           final_output_dir,
                                           is_train=False)
    else:
        logger.info('[error] no model file specified')
        assert 0

    #Buffers of previously generated samples
    fake_A_buffer = ReplayBuffer()
    fake_B_buffer = ReplayBuffer()

    # Image transformations
    transforms_ = [
        #transforms.Resize(int(config.img_height * 1.12), Image.BICUBIC),
        #transforms.RandomCrop((config.img_height, config.img_width)),
        #transforms.RandomHorizontalFlip(),
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ]

    # Dataset
    logger.info('=> loading testing dataset...')

    test_dataset = ImageDataset(config.DATA.TEST_DATASET_B,
                                config.DATA.TEST_DATASET,
                                transforms_=transforms_,
                                mode='test')

    # Test data loader
    test_dataloader = DataLoader(test_dataset,
                                 batch_size=config.TEST.BATCH_SIZE * len(gpus),
                                 shuffle=False,
                                 num_workers=config.NUM_WORKERS)

    test(config, model_dict, test_dataloader, criterion_dict, final_output_dir)
    logger.info('=> finished testing, saving generated images to {}'.format(
        final_output_dir + '/images'))
Ejemplo n.º 8
0
from function import test
print(test(50))
Ejemplo n.º 9
0
import test
import function
import embedding

if __name__ == '__main__':
    order = input('choose embedding/train/test\n')
    if order == 'embedding':
        embedding.build_embed()
    elif order == 'train':
        function.train()
    elif order == 'test':
        function.test()
Ejemplo n.º 10
0
def run():

   
    avg_time=test()##平均一个线程花费的时间
    logging.info(u'from function test get avg_time=%s'%avg_time)
    print u'u'from function test get avg_time=%s'%avg_time'
    count=0
    
    while True:
        try:
            logging.info(u'max wait 100 second try to get element from queue')
            d=song_info_queue.get(timeout=100)
            logging.info(u'get %s from queue'%d)

            msi=Multi_Song_Info(song_ids=d['ids'],refer=d['refer'])
            pool.submit(msi.insert_song)

            logging.info(u'%s start new threading, get song ids=%s,threading name is:%s,pid is:%s'%(sys._getframe().f_code.co_name,d['ids'],threading.current_thread().name,os.getpid()))
            count+=1

            if count>=max_pool*multiple:
                sleeptime=multiple*avg_time
                print u'generate 60 threading,so sleep %s second! current active threading num=%s'%(sleeptime,threading.active_count())
                time.sleep(sleeptime)
                count=0
            '''
            系统每创建60个线程就会休息60s,因为线程池的大小为21个,所以同时会运行21个线程,假如每个线程运行时间差不多,每个线程运行时间需要平均3s,
            那么大概9秒的时间,60个线程就会全部运行完毕。所以大概等待9秒左右,能够最大程度的利用系统资源。避免了过多等待时间。
            sleeptime=count最大值/线程池大小*平均每个线程运行时间(sleeptime=60/21*3=9)
            网络环境良好的环境下,数据库线程数很小(宿舍测试max_pool=11,count=22,timesleep=20的情况下,数据库线程数稳定在22)
            网络良好的情况下,sleeptime=30,max_count=60,max_pool=21时,mysql status线程数稳定在30-70左右。(在)
            网络良好的情况下,sleeptime=10,max_count=60,max_pool=21时,mysql status线程数越跑越多。运行5分钟后,线程会跑到500左右
            
            线程平均运行时间最大的影响因素在于网络流畅程度。网络状态良好的话,sleeptime设置20是没问题的。
            如果主线程不sleep的话,创建很多等待的线程,这些线程不能利用到连接池的优势,即一个数据库连接可以被不同的线程利用多次。
            因为数据库连接池中的每一个连接都被等待中的线程占用了。

            '''

        except Exception,e:              
            if str(e):
                e=str(e)
            else:##queue raise error e , str(e)为空
                e='queue empty'

            logging.warn(u' function %s raise  error cause by %s,traceback info is:%s '%(sys._getframe().f_code.co_name,e,traceback.format_exc()))
            print u'error info is:%s'%e

            if 'many connections' in e:##最好使用joinablequeue
                print u'current too many connections,sleep 3 second wait runing connections close'
                song_info_queue.put(d)
                print u'catch too many connections error ,so put d=%s back into queue'%d
                logging.info(u'catch too many connections error ,so put d=%s back into queue'%d)
                ##发生异常在于数据库操作,d的值可以获取到,所以把他重新放回queue中,所以不需要joinablequeue了
                time.sleep(3)

                continue
            else:
                print u'empty queue or other unknown error,so break loop!'
                print u'wait 20 second ensure runing threading done'
                time.sleep(20)
                break