Beispiel #1
0
def model_process(model_name, model_dir, in_options, sq, cq):
    try:
        model_path = Path(model_dir)

        import models
        model = models.import_model(model_name)(model_path, **in_options)
        converter = model.get_converter(**in_options)
        converter.dummy_predict()

        cq.put({
            'op': 'init',
            'converter': converter.copy_and_set_predictor(None)
        })

        closing = False
        while not closing:
            while not sq.empty():
                obj = sq.get()
                obj_op = obj['op']
                if obj_op == 'predict':
                    result = converter.predictor(obj['face'])
                    cq.put({'op': 'predict_result', 'result': result})
                elif obj_op == 'close':
                    closing = True
                    break
            time.sleep(0.005)

        model.finalize()

    except Exception as e:
        print('Error: %s' % (str(e)))
        traceback.print_exc()
def evaluate(print_grid=False):
    with tf.device('/gpu:0'): # run on specific device
        input_tensor, pred, gt = models.import_model(num_timesteps,
                                                     num_feats,
                                                     batch_size)

    dataset = data_loader.read_datasets(PREPROCESSED_DATA, dataset_type='test')

    saver = tf.train.Saver()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver.restore(sess, model_path)

        all_pred, all_gt = [], []
        for i in range(updates_per_epoch):
            input_batch, gt_batch = dataset.next_batch(batch_size)
            pred_value = sess.run([pred],
                                  {input_tensor : input_batch,
                                   gt : [gt_batch]})

            all_pred.append(pred_value)
            all_gt.append(gt_batch)

        num_align = 0
        rmse = []
        for i in range(len(all_pred)):
            if all_pred[i] == all_gt[i]: num_align += 1
            rmse.append(np.sqrt(np.power((all_pred[i] - all_gt[i]), 2)))

        print "Accuracy:", float(num_align)/len(all_pred)
        print "Avg. RMSE", np.mean(rmse)
        print "Variance RMSE", np.var(rmse)
Beispiel #3
0
def model_process(stdin_fd, model_name, model_dir, in_options, sq, cq):
    sys.stdin = os.fdopen(stdin_fd)

    try:
        model_path = Path(model_dir)

        import models
        model = models.import_model(model_name)(model_path, **in_options)
        converter = model.get_converter(**in_options)
        converter.dummy_predict()

        cq.put({
            'op': 'init',
            'converter': converter.copy_and_set_predictor(None)
        })

        while True:
            while not sq.empty():
                obj = sq.get()
                obj_op = obj['op']
                if obj_op == 'predict':
                    result = converter.predictor(obj['face'])
                    cq.put({'op': 'predict_result', 'result': result})
            time.sleep(0.005)
    except Exception as e:
        print('Error: %s' % (str(e)))
        traceback.print_exc()
Beispiel #4
0
def extrapolate(history_file, etc_dict=None):
    global saver
    global sess
    # etc_dict is a dict mapping from province to # of new ETCs there
    with tf.device('/gpu:0'):  # run on specific device
        input_tensor, pred, gt = models.import_model(num_timesteps,
                                                     num_feats,
                                                     batch_size)

    # dataset should be [num_provinces x (num_timesteps, num_feats)]
    data, provinces = np.load(history_file)

    saver = tf.train.Saver()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver.restore(sess, model_path)

        all_extrapolated = defaultdict(list)
        all_new_values = defaultdict(list)
        for province, province_data in zip(provinces, data):
            # for one province
            # get lat and lon
            lat, lon = province_data[0, 1:]
            extrapolated = []
            new_values = []
            old_value = province_data[-1, 0]
            for t in range(num_extrapolate):
                pred_value = sess.run([pred],
                                      {input_tensor: province_data})[0][0][0]
                if pred_value < 0:
                    pred_value = 0
                extrapolated.append(pred_value)

                new_value = pred_value
                if etc_dict and province in etc_dict:
                    new_value = old_value + ((pred_value - old_value) * (1/(etc_dict[province] + 3)))
                    # new_value *= (1 - etc_dict[province] * 0.1)
                old_value = pred_value
                new_values.append(new_value)
                new_sample = np.array([new_value, lat, lon])

                new_sample = np.reshape(new_sample, (1, -1))
                province_data = province_data[1:, :]
                province_data = np.concatenate((province_data, new_sample), axis=0)
                # make example with [pred_value, lat, lon]
                # remove first element in input batch and add extrapolated
            all_extrapolated[province] = extrapolated
            all_new_values[province] = new_values


    for i, province in enumerate(provinces):
        print province
        print data[i]
        print all_extrapolated[province]
        print all_new_values[province]
    # np.save('all_extrapolated', all_extrapolated)

    return all_extrapolated
def train():
    with tf.device('/gpu:0'): # run on specific device
        input_tensor, pred, gt = models.import_model(num_timesteps,
                                                     num_feats,
                                                     batch_size)
        loss = get_loss(pred, gt)
        optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=1.0)
        train = optimizer.minimize(loss=loss)

    dataset = data_loader.read_datasets(PREPROCESSED_DATA)
    saver = tf.train.Saver()  # defaults to saving all variables

    # logging the loss function
    loss_placeholder = tf.placeholder(tf.float32)
    tf.scalar_summary('train_loss', loss_placeholder)

    merged = tf.merge_all_summaries()

    init = tf.initialize_all_variables()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        writer = tf.train.SummaryWriter(os.path.join(working_directory, 'logs'),
                sess.graph_def)
        sess.run(init)

        for epoch in range(max_epoch):
            training_loss = 0.0

            widgets = ["epoch #%d|" % epoch, Percentage(), Bar(), ETA()]
            pbar = ProgressBar(updates_per_epoch, widgets=widgets)
            pbar.start()
            for i in range(updates_per_epoch):
                pbar.update(i)
                input_batch, gt_batch = dataset.next_batch(batch_size)
                _, loss_value = sess.run([train, loss],
                                         {input_tensor : input_batch,
                                          gt : [gt_batch]})
                training_loss += np.sum(loss_value)

            training_loss = training_loss/(updates_per_epoch)
            print("Loss %f" % training_loss)

            # save model
            if epoch % save_frequency == 0:
                checkpoints_folder = os.path.join(working_directory, 'checkpoints')
                if not os.path.exists(checkpoints_folder):
                    os.makedirs(checkpoints_folder)
                saver.save(sess, os.path.join(checkpoints_folder, 'model.ckpt'),
                           global_step=epoch)

                # save summaries
                summary_str = sess.run(merged,
                              feed_dict={input_tensor : input_batch,
                                         gt : [gt_batch],
                                         loss_placeholder: training_loss})
                writer.add_summary(summary_str, global_step=epoch)
        writer.close()
def evaluate(print_grid=False):
    data_paths = [
        conflict_data_file, climate_data_file, poverty_grid_file,
        poverty_mask_file
    ]
    dataset, conflict_mask, poverty_grid, poverty_mask = data_loader.read_datasets(
        data_paths, dataset_type='test')
    with tf.device('/gpu:0'):  # run on specific device
        conflict_grids, climate_grids, pov_grid, pred, gt = models.import_model(
            num_timesteps, input_size, poverty_grid.shape, input_size)

    saver = tf.train.Saver()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver.restore(sess, model_path)

        all_pred, all_gt = [], []
        for i in range(updates_per_epoch):
            conflict_grids_batch, gt_batch, climate_grids_batch = \
                                    dataset.next_batch(batch_size)
            pred_value = sess.run(
                [pred], {
                    conflict_grids: conflict_grids_batch,
                    climate_grids: climate_grids_batch,
                    pov_grid: poverty_grid,
                    gt: gt_batch
                })

            mask = conflict_mask * poverty_mask
            pred_value = pred_value * mask
            to_remove_idxs = np.where(mask.flatten() < 1)
            pred_value = np.delete(pred_value.flatten(), to_remove_idxs)
            gt_batch = np.delete(gt_batch.flatten(), to_remove_idxs)
            assert (len(pred_value) == len(gt_batch))

            for k in range(len(pred_value)):
                all_pred.append(pred_value[k])
                all_gt.append(gt_batch[k])

            if print_grid:
                np.set_printoptions(precision=1, linewidth=150, suppress=True)
                print('-' * 80)
                print(np.squeeze(pred_value))
                print(np.squeeze(gt_batch))

        get_stats(all_pred, all_gt)

        print "Collecting stats for random predictions"
        all_random = np.random.randint(0, 2, (len(all_pred)))
        get_stats(all_random, all_gt)
def init_model():
    global _saver
    global _sess
    global _input_tensor
    global _pred
    global _gt


    num_timesteps = 25
    num_feats = 3
    batch_size = 1

    _input_tensor, _pred, _gt = models.import_model(num_timesteps,
                                                 num_feats,
                                                 batch_size)
    _saver = tf.train.Saver()
    _sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True))
    _saver.restore(_sess, model_path)
def evaluate(print_grid=False):
    data_paths = [conflict_data_file, poverty_grid_file, poverty_mask_file]
    dataset, conflict_mask, poverty_grid, poverty_mask = data_loader.read_datasets(data_paths, dataset_type='test')
    with tf.device('/gpu:0'): # run on specific device
        conflict_grids, pov_grid, pred, gt = models.import_model(num_timesteps, 
						                 input_size,
                                                                 poverty_grid.shape)

    saver = tf.train.Saver() 
    
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver.restore(sess, model_path)

        all_pred, all_gt = [], []
        for i in range(updates_per_epoch):
            conflict_grids_batch, gt_batch = \
                                    dataset.next_batch(batch_size)
            pred_value = sess.run([pred], 
                                  {conflict_grids : conflict_grids_batch,
				   pov_grid : poverty_grid,
                                   gt : gt_batch})

	    mask = conflict_mask * poverty_mask
            pred_value = pred_value * mask
            to_remove_idxs = np.where(mask.flatten() < 1)
            pred_value = np.delete(pred_value.flatten(), to_remove_idxs)
            gt_batch = np.delete(gt_batch.flatten(), to_remove_idxs)
            assert(len(pred_value) == len(gt_batch))

            for k in range(len(pred_value)):
                all_pred.append(pred_value[k])
                all_gt.append(gt_batch[k])

            if print_grid:
                np.set_printoptions(precision=1, linewidth = 150, suppress=True)
                print('-'*80)
                print(np.squeeze(pred_value)) 
                print(np.squeeze(gt_batch))
        
        get_stats(all_pred, all_gt)

        print "Collecting stats for random predictions"
        all_random = np.random.randint(0, 2, (len(all_pred)))
        get_stats(all_random, all_gt)
Beispiel #9
0
    def __init__(self, model, vr):
        self.model_name = model
        self.workers = 8

        nor = False
        if model in ['EfficientNet']:
            nor = True

        self.data_train = dataset(subset='Train',
                                  valid_ratio=vr,
                                  normalize=nor)
        self.data_valid = dataset(subset='Valid',
                                  valid_ratio=vr,
                                  normalize=nor)
        self.data_test = dataset(subset='Test', normalize=nor)

        print(self.data_train)
        print(self.data_test)

        self.model = import_model(model)
        Log.log(Log.INFO, f'Running on model [ {model} ].')

        self.data_loader_train = torch.utils.data.DataLoader(
            dataset=self.data_train,
            batch_size=self.model.batch_size,
            shuffle=True,
            num_workers=self.workers)
        self.data_loader_valid = torch.utils.data.DataLoader(
            dataset=self.data_valid,
            batch_size=self.model.batch_size,
            shuffle=False,
            num_workers=self.workers)
        self.data_loader_test = torch.utils.data.DataLoader(
            dataset=self.data_test,
            batch_size=self.model.batch_size,
            shuffle=False,
            num_workers=0)

        self.device = self.get_available_device()
        dataset.clear()

        if not os.path.exists(os.path.join(self.log_path, model)):
            os.makedirs(os.path.join(self.log_path, model))
        self.full_log_path = os.path.join(self.log_path, model, 'log_data.csv')
Beispiel #10
0
def trainerThread(s2c,
                  c2s,
                  e,
                  model_class_name=None,
                  saved_models_path=None,
                  training_data_src_path=None,
                  training_data_dst_path=None,
                  pretraining_data_path=None,
                  pretrained_model_path=None,
                  no_preview=False,
                  force_model_name=None,
                  force_gpu_idxs=None,
                  cpu_only=None,
                  silent_start=False,
                  execute_programs=None,
                  debug=False,
                  **kwargs):
    while True:
        try:
            start_time = time.time()

            save_interval_min = 15

            if not training_data_src_path.exists():
                training_data_src_path.mkdir(exist_ok=True, parents=True)

            if not training_data_dst_path.exists():
                training_data_dst_path.mkdir(exist_ok=True, parents=True)

            if not saved_models_path.exists():
                saved_models_path.mkdir(exist_ok=True, parents=True)

            model = models.import_model(model_class_name)(
                is_training=True,
                saved_models_path=saved_models_path,
                training_data_src_path=training_data_src_path,
                training_data_dst_path=training_data_dst_path,
                pretraining_data_path=pretraining_data_path,
                pretrained_model_path=pretrained_model_path,
                no_preview=no_preview,
                force_model_name=force_model_name,
                force_gpu_idxs=force_gpu_idxs,
                cpu_only=cpu_only,
                silent_start=silent_start,
                debug=debug,
            )

            is_reached_goal = model.is_reached_iter_goal()

            shared_state = {'after_save': False}
            loss_string = ""
            save_iter = model.get_iter()

            def model_save():
                if not debug and not is_reached_goal:
                    io.log_info("保存....", end='\r')
                    model.save()
                    shared_state['after_save'] = True

            def model_backup():
                if not debug and not is_reached_goal:
                    model.create_backup()

            def send_preview():
                if not debug:
                    previews = model.get_previews()
                    c2s.put({
                        'op': 'show',
                        'previews': previews,
                        'iter': model.get_iter(),
                        'loss_history': model.get_loss_history().copy()
                    })
                else:
                    previews = [('debug, press update for new',
                                 model.debug_one_iter())]
                    c2s.put({'op': 'show', 'previews': previews})
                e.set()  #Set the GUI Thread as Ready

            if model.get_target_iter() != 0:
                if is_reached_goal:
                    io.log_info('模型训练次数已经达到设定值. 你可以通过窗口预览效果.')
                else:
                    io.log_info('启动中. 目标迭代: %d. 按 "Enter" 停止训练并保存进度.' %
                                (model.get_target_iter()))
            else:
                io.log_info(
                    "\n 各种高性价比、参数模型\n 联系QQ:395267954\n=============================================\n"
                )
                io.log_info(
                    '启动中. 按 "Enter" 停止训练并保存进度。\n\n保存时间|迭代次数|单次时间|源损失|目标损失')
            last_save_time = time.time()

            execute_programs = [[x[0], x[1], time.time()]
                                for x in execute_programs]

            for i in itertools.count(0, 1):
                if not debug:
                    cur_time = time.time()

                    for x in execute_programs:
                        prog_time, prog, last_time = x
                        exec_prog = False
                        if prog_time > 0 and (cur_time -
                                              start_time) >= prog_time:
                            x[0] = 0
                            exec_prog = True
                        elif prog_time < 0 and (cur_time -
                                                last_time) >= -prog_time:
                            x[2] = cur_time
                            exec_prog = True

                        if exec_prog:
                            try:
                                exec(prog)
                            except Exception as e:
                                print("Unable to execute program: %s" % (prog))

                    if not is_reached_goal:

                        if model.get_iter() == 0:
                            io.log_info("")
                            io.log_info("正在尝试运行第一个迭代. 如果出现错误, 请降低参数配置")
                            io.log_info("")

                        iter, iter_time = model.train_one_iter()

                        loss_history = model.get_loss_history()
                        time_str = time.strftime("[%H:%M:%S]")
                        if iter_time >= 10:
                            loss_string = "{0}[#{1:06d}][{2:.5s}s]".format(
                                time_str, iter, '{:0.4f}'.format(iter_time))
                        else:
                            loss_string = "{0}[#{1:06d}][{2:04d}ms]".format(
                                time_str, iter, int(iter_time * 1000))

                        if shared_state['after_save']:
                            shared_state['after_save'] = False

                            mean_loss = np.mean(loss_history[save_iter:iter],
                                                axis=0)

                            for loss_value in mean_loss:
                                loss_string += "[%.4f]" % (loss_value)

                            io.log_info(loss_string)

                            save_iter = iter
                        else:
                            for loss_value in loss_history[-1]:
                                loss_string += "[%.4f]" % (loss_value)

                            if io.is_colab():
                                io.log_info('\r' + loss_string, end='')
                            else:
                                io.log_info(loss_string, end='\r')

                        if model.get_iter() == 1:
                            model_save()

                        if model.get_target_iter(
                        ) != 0 and model.is_reached_iter_goal():
                            io.log_info('达到目标迭代.')
                            model_save()
                            is_reached_goal = True
                            io.log_info('可以使用预览窗口.')

                if not is_reached_goal and (time.time() - last_save_time
                                            ) >= save_interval_min * 60:
                    last_save_time += save_interval_min * 60
                    model_save()
                    send_preview()

                if i == 0:
                    if is_reached_goal:
                        model.pass_one_iter()
                    send_preview()

                if debug:
                    time.sleep(0.005)

                while not s2c.empty():
                    input = s2c.get()
                    op = input['op']
                    if op == 'save':
                        model_save()
                    elif op == 'backup':
                        model_backup()
                    elif op == 'preview':
                        if is_reached_goal:
                            model.pass_one_iter()
                        send_preview()
                    elif op == 'close':
                        model_save()
                        i = -1
                        break

                if i == -1:
                    break

            model.finalize()

        except Exception as e:
            print('Error: %s' % (str(e)))
            traceback.print_exc()
        break
    c2s.put({'op': 'close'})
Beispiel #11
0
def trainerThread(s2c, c2s, args, device_args):
    while True:
        try:
            training_data_src_path = Path(args.get('training_data_src_dir',
                                                   ''))
            training_data_dst_path = Path(args.get('training_data_dst_dir',
                                                   ''))
            model_path = Path(args.get('model_path', ''))
            model_name = args.get('model_name', '')
            save_interval_min = 15
            debug = args.get('debug', '')

            if not training_data_src_path.exists():
                io.log_err('Training data src directory does not exist.')
                break

            if not training_data_dst_path.exists():
                io.log_err('Training data dst directory does not exist.')
                break

            if not model_path.exists():
                model_path.mkdir(exist_ok=True)

            model = models.import_model(model_name)(
                model_path,
                training_data_src_path=training_data_src_path,
                training_data_dst_path=training_data_dst_path,
                debug=debug,
                device_args=device_args)

            is_reached_goal = model.is_reached_iter_goal()
            is_upd_save_time_after_train = False
            loss_string = ""

            def model_save():
                if not debug and not is_reached_goal:
                    io.log_info("Saving....", end='\r')
                    model.save()
                    io.log_info(loss_string)
                    is_upd_save_time_after_train = True

            def send_preview():
                if not debug:
                    previews = model.get_previews()
                    c2s.put({
                        'op': 'show',
                        'previews': previews,
                        'iter': model.get_iter(),
                        'loss_history': model.get_loss_history().copy()
                    })
                else:
                    previews = [('debug, press update for new',
                                 model.debug_one_iter())]
                    c2s.put({'op': 'show', 'previews': previews})

            if model.is_first_run():
                model_save()

            if model.get_target_iter() != 0:
                if is_reached_goal:
                    io.log_info(
                        'Model already trained to target iteration. You can use preview.'
                    )
                else:
                    io.log_info(
                        'Starting. Target iteration: %d. Press "Enter" to stop training and save model.'
                        % (model.get_target_iter()))
            else:
                io.log_info(
                    'Starting. Press "Enter" to stop training and save model.')

            last_save_time = time.time()

            for i in itertools.count(0, 1):
                if not debug:
                    if not is_reached_goal:
                        loss_string = model.train_one_iter()
                        if is_upd_save_time_after_train:
                            #save resets plaidML programs, so upd last_save_time only after plaidML rebuild them
                            last_save_time = time.time()

                        io.log_info(loss_string, end='\r')
                        if model.get_target_iter(
                        ) != 0 and model.is_reached_iter_goal():
                            io.log_info('Reached target iteration.')
                            model_save()
                            is_reached_goal = True
                            io.log_info('You can use preview now.')

                if not is_reached_goal and (time.time() - last_save_time
                                            ) >= save_interval_min * 60:
                    last_save_time = time.time()
                    model_save()
                    send_preview()

                if i == 0:
                    if is_reached_goal:
                        model.pass_one_iter()
                    send_preview()

                if debug:
                    time.sleep(0.005)

                while not s2c.empty():
                    input = s2c.get()
                    op = input['op']
                    if op == 'save':
                        model_save()
                    elif op == 'preview':
                        if is_reached_goal:
                            model.pass_one_iter()
                        send_preview()
                    elif op == 'close':
                        model_save()
                        i = -1
                        break

                if i == -1:
                    break

            model.finalize()

        except Exception as e:
            print('Error: %s' % (str(e)))
            traceback.print_exc()
        break
    c2s.put({'op': 'close'})
Beispiel #12
0
def main (input_dir, output_dir, aligned_dir, model_dir, model_name, **in_options):
    print ("Running converter.\r\n")
    
    try:
        input_path = Path(input_dir)
        output_path = Path(output_dir)
        aligned_path = Path(aligned_dir)
        model_path = Path(model_dir)
        
        if not input_path.exists():
            print('Input directory not found. Please ensure it exists.')
            return

        if output_path.exists():
            for filename in Path_utils.get_image_paths(output_path):
                Path(filename).unlink()
        else:
            output_path.mkdir(parents=True, exist_ok=True)
            
        if not aligned_path.exists():
            print('Aligned directory not found. Please ensure it exists.')
            return
            
        if not model_path.exists():
            print('Model directory not found. Please ensure it exists.')
            return

        import models 
        model = models.import_model(model_name)(model_path, **in_options)
        converter = model.get_converter(**in_options)
        
        input_path_image_paths = Path_utils.get_image_paths(input_path)
        aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
        
        alignments = {}
        for filename in tqdm(aligned_path_image_paths, desc= "Collecting alignments" ):
            a_png = AlignedPNG.load( str(filename) )
            if a_png is None:
                print ( "%s - no embedded data found." % (filename) )
                continue
            d = a_png.getFaceswapDictData()
            if d is None or d['source_filename'] is None or d['source_rect'] is None or d['source_landmarks'] is None:
                print ( "%s - no embedded data found." % (filename) )
                continue
            
            source_filename_stem = Path(d['source_filename']).stem
            if source_filename_stem not in alignments.keys():
                alignments[ source_filename_stem ] = []

            alignments[ source_filename_stem ].append ( np.array(d['source_landmarks']) )

        
        for filename in tqdm( input_path_image_paths, desc="Converting"):
            filename_path = Path(filename)
            output_filename_path = output_path / filename_path.name
         
            if filename_path.stem not in alignments.keys():                        
                if not model.is_debug():
                    print ( 'no faces found for %s, copying without faces' % (filename_path.name) )                
                    shutil.copy ( str(filename_path), str(output_filename_path) )                
            else:                    
                image = (cv2.imread(filename) / 255.0).astype('float32')
                faces = alignments[filename_path.stem]
                for image_landmarks in faces:                
                    image = converter.convert(image, image_landmarks, model.is_debug()) 
        
                    if model.is_debug():
                        for img in image:
                            cv2.imshow ('Debug convert', img )
                            cv2.waitKey(0)
                
                if not model.is_debug():
                    cv2.imwrite (str(output_filename_path), (image*255).astype(np.uint8) )
        
        model.finalize()
    except Exception as e:
        print ( 'Error: %s' % (str(e)))
        traceback.print_exc()
Beispiel #13
0
def trainerThread (input_queue, output_queue, training_data_src_dir, training_data_dst_dir, model_path, model_name, save_interval_min=10, debug=False, **in_options):

    while True:
        try: 
            training_data_src_path = Path(training_data_src_dir)
            training_data_dst_path = Path(training_data_dst_dir)
            model_path = Path(model_path)
            
            if not training_data_src_path.exists():
                print( 'Training data src directory is not exists.')
                return
                
            if not training_data_dst_path.exists():
                print( 'Training data dst directory is not exists.')
                return
                
            if not model_path.exists():
                model_path.mkdir(exist_ok=True)
   
            model = models.import_model(model_name)(
                        model_path, 
                        training_data_src_path=training_data_src_path, 
                        training_data_dst_path=training_data_dst_path, 
                        debug=debug,
                        **in_options)
            
            is_reached_goal = model.is_reached_epoch_goal()
            
            def model_save():
                if not debug and not is_reached_goal:
                    model.save()
            
            def send_preview():
                if not debug:                        
                    previews = model.get_previews()                
                    output_queue.put ( {'op':'show', 'previews': previews, 'epoch':model.get_epoch(), 'loss_history': model.get_loss_history().copy() } )
                else:
                    previews = [( 'debug, press update for new', model.debug_one_epoch())]
                    output_queue.put ( {'op':'show', 'previews': previews} )
            
            
            if model.is_first_run():
                model_save()
                
            if model.get_target_epoch() != 0:
                if is_reached_goal:
                    print ('Model already trained to target epoch. You can use preview.')
                else:
                    print('Starting. Target epoch: %d. Press "Enter" to stop training and save model.' % ( model.get_target_epoch()  ) )
            else: 
                print('Starting. Press "Enter" to stop training and save model.')
 
            last_save_time = time.time()
            for i in itertools.count(0,1):
                if not debug:
                    if not is_reached_goal:
                        loss_string = model.train_one_epoch()     

                        print (loss_string, end='\r')
                        if model.get_target_epoch() != 0 and model.is_reached_epoch_goal():
                            print ('Reached target epoch.')
                            model_save()
                            is_reached_goal = True
                            print ('You can use preview now.')

                if not is_reached_goal and (time.time() - last_save_time) >= save_interval_min*60:
                    last_save_time = time.time() 
                    model_save()
                    send_preview()
                    
                if i==0:
                    if is_reached_goal:
                        model.pass_one_epoch()    
                    send_preview()
                    
                if debug:
                    time.sleep(0.005)
                    
                while not input_queue.empty():
                    input = input_queue.get()
                    op = input['op']
                    if op == 'save':
                        model_save()
                    elif op == 'preview':                    
                        if is_reached_goal:
                            model.pass_one_epoch()                    
                        send_preview()
                    elif op == 'close':
                        model_save()
                        i = -1
                        break
                        
                if i == -1:
                    break
                    
                

            model.finalize()
                
        except Exception as e:
            print ('Error: %s' % (str(e)))
            traceback.print_exc()
        break
    output_queue.put ( {'op':'close'} )
Beispiel #14
0
def trainerThread(s2c, c2s, e, args, device_args):
    while True:
        try:
            start_time = time.time()

            training_data_src_path = Path(args.get("training_data_src_dir", ""))
            training_data_dst_path = Path(args.get("training_data_dst_dir", ""))

            pretraining_data_path = args.get("pretraining_data_dir", "")
            pretraining_data_path = (
                Path(pretraining_data_path)
                if pretraining_data_path is not None
                else None
            )

            model_path = Path(args.get("model_path", ""))
            model_name = args.get("model_name", "")
            save_interval_min = 15
            debug = args.get("debug", "")
            execute_programs = args.get("execute_programs", [])

            if not training_data_src_path.exists():
                io.log_err("Training data src directory does not exist.")
                break

            if not training_data_dst_path.exists():
                io.log_err("Training data dst directory does not exist.")
                break

            if not model_path.exists():
                model_path.mkdir(exist_ok=True)

            model = models.import_model(model_name)(
                model_path,
                training_data_src_path=training_data_src_path,
                training_data_dst_path=training_data_dst_path,
                pretraining_data_path=pretraining_data_path,
                debug=debug,
                device_args=device_args,
            )

            is_reached_goal = model.is_reached_iter_goal()

            shared_state = {"after_save": False}
            loss_string = ""
            save_iter = model.get_iter()

            def model_save():
                if not debug and not is_reached_goal:
                    io.log_info("Saving....", end="\r")
                    model.save()
                    shared_state["after_save"] = True

            def send_preview():
                if not debug:
                    previews = model.get_previews()
                    c2s.put(
                        {
                            "op": "show",
                            "previews": previews,
                            "iter": model.get_iter(),
                            "loss_history": model.get_loss_history().copy(),
                        }
                    )
                else:
                    previews = [("debug, press update for new", model.debug_one_iter())]
                    c2s.put({"op": "show", "previews": previews})
                e.set()  # Set the GUI Thread as Ready

            if model.is_first_run():
                model_save()

            if model.get_target_iter() != 0:
                if is_reached_goal:
                    io.log_info(
                        "Model already trained to target iteration. You can use preview."
                    )
                else:
                    io.log_info(
                        'Starting. Target iteration: %d. Press "Enter" to stop training and save model.'
                        % (model.get_target_iter())
                    )
            else:
                io.log_info('Starting. Press "Enter" to stop training and save model.')

            last_save_time = time.time()

            execute_programs = [[x[0], x[1], time.time()] for x in execute_programs]

            for i in itertools.count(0, 1):
                if not debug:
                    cur_time = time.time()

                    for x in execute_programs:
                        prog_time, prog, last_time = x
                        exec_prog = False
                        if prog_time > 0 and (cur_time - start_time) >= prog_time:
                            x[0] = 0
                            exec_prog = True
                        elif prog_time < 0 and (cur_time - last_time) >= -prog_time:
                            x[2] = cur_time
                            exec_prog = True

                        if exec_prog:
                            try:
                                exec(prog)
                            except Exception as e:
                                print("Unable to execute program: %s" % (prog))

                    if not is_reached_goal:
                        iter, iter_time = model.train_one_iter()

                        loss_history = model.get_loss_history()
                        time_str = time.strftime("[%H:%M:%S]")
                        if iter_time >= 10:
                            loss_string = "{0}[#{1:06d}][{2:.5s}s]".format(
                                time_str, iter, "{:0.4f}".format(iter_time)
                            )
                        else:
                            loss_string = "{0}[#{1:06d}][{2:04d}ms]".format(
                                time_str, iter, int(iter_time * 1000)
                            )

                        if shared_state["after_save"]:
                            shared_state["after_save"] = False
                            last_save_time = (
                                time.time()
                            )  # upd last_save_time only after save+one_iter, because plaidML rebuilds programs after save https://github.com/plaidml/plaidml/issues/274

                            mean_loss = np.mean(
                                [
                                    np.array(loss_history[i])
                                    for i in range(save_iter, iter)
                                ],
                                axis=0,
                            )

                            for loss_value in mean_loss:
                                loss_string += "[%.4f]" % (loss_value)

                            io.log_info(loss_string)

                            save_iter = iter
                        else:
                            for loss_value in loss_history[-1]:
                                loss_string += "[%.4f]" % (loss_value)

                            if io.is_colab():
                                io.log_info("\r" + loss_string, end="")
                            else:
                                io.log_info(loss_string, end="\r")

                        if (
                            model.get_target_iter() != 0
                            and model.is_reached_iter_goal()
                        ):
                            io.log_info("Reached target iteration.")
                            model_save()
                            is_reached_goal = True
                            io.log_info("You can use preview now.")
                            break

                if (
                    not is_reached_goal
                    and (time.time() - last_save_time) >= save_interval_min * 60
                ):
                    model_save()
                    send_preview()

                if i == 0:
                    if is_reached_goal:
                        model.pass_one_iter()
                    send_preview()

                if debug:
                    time.sleep(0.005)

                while not s2c.empty():
                    input = s2c.get()
                    op = input["op"]
                    if op == "save":
                        model_save()
                    elif op == "preview":
                        if is_reached_goal:
                            model.pass_one_iter()
                        send_preview()
                    elif op == "close":
                        model_save()
                        i = -1
                        break

                if i == -1:
                    break

            model.finalize()

        except Exception as e:
            print("Error: %s" % (str(e)))
            traceback.print_exc()
        break
    c2s.put({"op": "close"})
Beispiel #15
0
def trainerThread(s2c, c2s, e, args, device_args):
    while True:
        try:
            start_time = time.time()

            training_data_src_path = Path(args.get('training_data_src_dir',
                                                   ''))
            training_data_dst_path = Path(args.get('training_data_dst_dir',
                                                   ''))

            pretraining_data_path = args.get('pretraining_data_dir', '')
            pretraining_data_path = Path(
                pretraining_data_path
            ) if pretraining_data_path is not None else None

            model_path = Path(args.get('model_path', ''))
            model_name = args.get('model_name', '')
            save_interval_min = 5
            target_loss = args.get("target_loss", 0)
            debug = args.get('debug', '')
            execute_programs = args.get('execute_programs', [])

            if not training_data_src_path.exists():
                io.log_err('Training data src directory does not exist.')
                break

            if not training_data_dst_path.exists():
                io.log_err('Training data dst directory does not exist.')
                break

            if not model_path.exists():
                model_path.mkdir(exist_ok=True)

            model = models.import_model(model_name)(
                model_path,
                training_data_src_path=training_data_src_path,
                training_data_dst_path=training_data_dst_path,
                pretraining_data_path=pretraining_data_path,
                debug=debug,
                device_args=device_args)

            is_reached_goal = model.is_reached_iter_goal()

            shared_state = {'after_save': False}
            loss_string = ""
            save_iter = model.get_iter()

            def model_save():
                if not debug and not is_reached_goal:
                    io.log_info("Saving....", end='\r')
                    model.save()
                    backup()
                    shared_state['after_save'] = True

            def backup():
                import F
                if model.is_first_run():
                    return
                has_backup = F.has_backup(model_name, model_path)
                io.log_info("Backup....", end='\r')
                loss_src_mean, loss_dst_mean = np.mean([
                    np.array(loss_history[i]) for i in range(save_iter, iter)
                ],
                                                       axis=0)
                loss_src, loss_dst = loss_history[-1]
                if has_backup and (iter > 20000 and loss_src_mean > 1
                                   or loss_dst_mean > 1 or loss_src > 1
                                   or loss_dst > 1):
                    if model_name == "SAE" and model.options['archi'] == 'df':
                        F.restore_model(model_name, model_path)
                        weights_to_load = [
                            [model.encoder, 'encoder.h5'],
                            [model.decoder_src, 'decoder_src.h5'],
                            [model.decoder_dst, 'decoder_dst.h5'],
                            [model.decoder_srcm, 'decoder_srcm.h5'],
                            [model.decoder_dstm, 'decoder_dstm.h5']
                        ]
                        model.load_weights_safe(weights_to_load)
                        io.log_info("Crash And Try Restore....")
                if loss_src_mean <= 1 and loss_dst_mean <= 1 and loss_src <= 1 and loss_dst <= 1:
                    F.backup_model_move(model_name, model_path)
                    F.backup_model(model_name, model_path)

            def send_preview():
                if not debug:
                    previews = model.get_previews()
                    c2s.put({
                        'op': 'show',
                        'previews': previews,
                        'iter': model.get_iter(),
                        'loss_history': model.get_loss_history().copy()
                    })
                else:
                    previews = [('debug, press update for new',
                                 model.debug_one_iter())]
                    c2s.put({'op': 'show', 'previews': previews})
                e.set()  #Set the GUI Thread as Ready

            if model.is_first_run():
                model_save()

            if model.get_target_iter() != 0:
                if is_reached_goal:
                    io.log_info(
                        'Model already trained to target iteration. You can use preview.'
                    )
                else:
                    io.log_info(
                        'Starting. Target iteration: %d. Press "Enter" to stop training and save model.'
                        % (model.get_target_iter()))
            else:
                io.log_info(
                    'Starting. Press "Enter" to stop training and save model.')

            last_save_time = time.time()

            execute_programs = [[x[0], x[1], time.time()]
                                for x in execute_programs]

            for i in itertools.count(0, 1):
                if not debug:
                    cur_time = time.time()

                    for x in execute_programs:
                        prog_time, prog, last_time = x
                        exec_prog = False
                        if prog_time > 0 and (cur_time -
                                              start_time) >= prog_time:
                            x[0] = 0
                            exec_prog = True
                        elif prog_time < 0 and (cur_time -
                                                last_time) >= -prog_time:
                            x[2] = cur_time
                            exec_prog = True

                        if exec_prog:
                            try:
                                exec(prog)
                            except Exception as e:
                                print("Unable to execute program: %s" % (prog))

                    if not is_reached_goal:
                        iter, iter_time = model.train_one_iter()

                        loss_history = model.get_loss_history()
                        time_str = time.strftime("[%H:%M:%S]")
                        if iter_time >= 10:
                            loss_string = "{0}[#{1:06d}][{2:.5s}s]".format(
                                time_str, iter, '{:0.4f}'.format(iter_time))
                        else:
                            loss_string = "{0}[#{1:06d}][{2:04d}ms]".format(
                                time_str, iter, int(iter_time * 1000))

                        if shared_state['after_save']:
                            shared_state['after_save'] = False
                            last_save_time = time.time(
                            )  #upd last_save_time only after save+one_iter, because plaidML rebuilds programs after save https://github.com/plaidml/plaidml/issues/274

                            mean_loss = np.mean([
                                np.array(loss_history[i])
                                for i in range(save_iter, iter)
                            ],
                                                axis=0)

                            for loss_value in mean_loss:
                                loss_string += "[%.4f]" % (loss_value)

                            io.log_info(loss_string)

                            save_iter = iter

                            if mean_loss[0] <= target_loss and mean_loss[
                                    1] <= target_loss:
                                is_reached_goal = True
                                break
                        else:
                            for loss_value in loss_history[-1]:
                                loss_string += "[%.4f]" % (loss_value)

                            if io.is_colab():
                                io.log_info('\r' + loss_string, end='')
                            else:
                                io.log_info(loss_string, end='\r')

                        if model.get_target_iter(
                        ) != 0 and model.is_reached_iter_goal():
                            io.log_info('Reached target iteration.')
                            model_save()
                            is_reached_goal = True
                            io.log_info('You can use preview now.')

                if not is_reached_goal and (time.time() - last_save_time
                                            ) >= save_interval_min * 60:
                    model_save()
                    send_preview()

                if i == 0:
                    if is_reached_goal:
                        model.pass_one_iter()
                    send_preview()

                if debug:
                    time.sleep(0.005)

                while not s2c.empty():
                    input = s2c.get()
                    op = input['op']
                    if op == 'save':
                        model_save()
                    elif op == 'preview':
                        if is_reached_goal:
                            model.pass_one_iter()
                        send_preview()
                    elif op == 'close':
                        model_save()
                        i = -1
                        break

                if i == -1:
                    break

            model.finalize()

        except Exception as e:
            print('Error: %s' % (str(e)))
            traceback.print_exc()
        break
    c2s.put({'op': 'close'})
Beispiel #16
0
def main (args, device_args):
    io.log_info ("Running converter.\r\n")

    aligned_dir = args.get('aligned_dir', None)
    avaperator_aligned_dir = args.get('avaperator_aligned_dir', None)

    try:
        input_path = Path(args['input_dir'])
        output_path = Path(args['output_dir'])
        model_path = Path(args['model_dir'])

        if not input_path.exists():
            io.log_err('Input directory not found. Please ensure it exists.')
            return

        if output_path.exists():
            for filename in Path_utils.get_image_paths(output_path):
                Path(filename).unlink()
        else:
            output_path.mkdir(parents=True, exist_ok=True)

        if not model_path.exists():
            io.log_err('Model directory not found. Please ensure it exists.')
            return

        is_interactive = io.input_bool ("Use interactive converter? (y/n skip:y) : ", True) if not io.is_colab() else False

        import models
        model = models.import_model( args['model_name'] )(model_path, device_args=device_args)

        cfg = model.get_ConverterConfig()

        if not is_interactive:
            cfg.ask_settings()

        input_path_image_paths = Path_utils.get_image_paths(input_path)

        if cfg.type == ConverterConfig.TYPE_MASKED:
            if aligned_dir is None:
                io.log_err('Aligned directory not found. Please ensure it exists.')
                return

            aligned_path = Path(aligned_dir)
            if not aligned_path.exists():
                io.log_err('Aligned directory not found. Please ensure it exists.')
                return

            alignments = {}
            multiple_faces_detected = False
            aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
            for filepath in io.progress_bar_generator(aligned_path_image_paths, "Collecting alignments"):
                filepath = Path(filepath)

                if filepath.suffix == '.png':
                    dflimg = DFLPNG.load( str(filepath) )
                elif filepath.suffix == '.jpg':
                    dflimg = DFLJPG.load ( str(filepath) )
                else:
                    dflimg = None

                if dflimg is None:
                    io.log_err ("%s is not a dfl image file" % (filepath.name) )
                    continue

                source_filename_stem = Path( dflimg.get_source_filename() ).stem
                if source_filename_stem not in alignments.keys():
                    alignments[ source_filename_stem ] = []

                alignments_ar = alignments[ source_filename_stem ]
                alignments_ar.append (dflimg.get_source_landmarks())
                if len(alignments_ar) > 1:
                    multiple_faces_detected = True

            if multiple_faces_detected:
                io.log_info ("Warning: multiple faces detected. Strongly recommended to process them separately.")

            frames = [ ConvertSubprocessor.Frame( frame_info=FrameInfo(filename=p, landmarks_list=alignments.get(Path(p).stem, None))) for p in input_path_image_paths ]

            if multiple_faces_detected:
                io.log_info ("Warning: multiple faces detected. Motion blur will not be used.")
            else:
                s = 256
                local_pts = [ (s//2-1, s//2-1), (s//2-1,0) ] #center+up
                frames_len = len(frames)
                for i in io.progress_bar_generator( range(len(frames)) , "Computing motion vectors"):
                    fi_prev = frames[max(0, i-1)].frame_info
                    fi      = frames[i].frame_info
                    fi_next = frames[min(i+1, frames_len-1)].frame_info
                    if len(fi_prev.landmarks_list) == 0 or \
                       len(fi.landmarks_list) == 0 or \
                       len(fi_next.landmarks_list) == 0:
                            continue

                    mat_prev = LandmarksProcessor.get_transform_mat ( fi_prev.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat      = LandmarksProcessor.get_transform_mat ( fi.landmarks_list[0]     , s, face_type=FaceType.FULL)
                    mat_next = LandmarksProcessor.get_transform_mat ( fi_next.landmarks_list[0], s, face_type=FaceType.FULL)

                    pts_prev = LandmarksProcessor.transform_points (local_pts, mat_prev, True)
                    pts      = LandmarksProcessor.transform_points (local_pts, mat, True)
                    pts_next = LandmarksProcessor.transform_points (local_pts, mat_next, True)

                    prev_vector = pts[0]-pts_prev[0]
                    next_vector = pts_next[0]-pts[0]

                    motion_vector = pts_next[0] - pts_prev[0]
                    fi.motion_power = npla.norm(motion_vector)

                    motion_vector = motion_vector / fi.motion_power if fi.motion_power != 0 else np.array([0,0],dtype=np.float32)

                    fi.motion_deg = -math.atan2(motion_vector[1],motion_vector[0])*180 / math.pi


        elif cfg.type == ConverterConfig.TYPE_FACE_AVATAR:
            filesdata = []
            for filepath in io.progress_bar_generator(input_path_image_paths, "Collecting info"):
                filepath = Path(filepath)

                if filepath.suffix == '.png':
                    dflimg = DFLPNG.load( str(filepath) )
                elif filepath.suffix == '.jpg':
                    dflimg = DFLJPG.load ( str(filepath) )
                else:
                    dflimg = None

                if dflimg is None:
                    io.log_err ("%s is not a dfl image file" % (filepath.name) )
                    continue
                filesdata += [ ( FrameInfo(filename=str(filepath), landmarks_list=[dflimg.get_landmarks()] ), dflimg.get_source_filename() ) ]

            filesdata = sorted(filesdata, key=operator.itemgetter(1)) #sort by filename
            frames = []
            filesdata_len = len(filesdata)
            for i in range(len(filesdata)):
                frame_info = filesdata[i][0]

                prev_temporal_frame_infos = []
                next_temporal_frame_infos = []

                for t in range (cfg.temporal_face_count):
                    prev_frame_info = filesdata[ max(i -t, 0) ][0]
                    next_frame_info = filesdata[ min(i +t, filesdata_len-1 )][0]

                    prev_temporal_frame_infos.insert (0, prev_frame_info )
                    next_temporal_frame_infos.append (   next_frame_info )

                frames.append ( ConvertSubprocessor.Frame(prev_temporal_frame_infos=prev_temporal_frame_infos,
                                                          frame_info=frame_info,
                                                          next_temporal_frame_infos=next_temporal_frame_infos) )

        if len(frames) == 0:
            io.log_info ("No frames to convert in input_dir.")
        else:
            ConvertSubprocessor (
                        is_interactive         = is_interactive,
                        converter_config       = cfg,
                        frames                 = frames,
                        output_path            = output_path,
                    ).run()

        model.finalize()

    except Exception as e:
        print ( 'Error: %s' % (str(e)))
        traceback.print_exc()
Beispiel #17
0
def main(args, device_args):
    io.log_info("Running converter.\r\n")

    aligned_dir = args.get('aligned_dir', None)
    avaperator_aligned_dir = args.get('avaperator_aligned_dir', None)

    try:
        input_path = Path(args['input_dir'])
        output_path = Path(args['output_dir'])
        model_path = Path(args['model_dir'])

        if not input_path.exists():
            io.log_err('Input directory not found. Please ensure it exists.')
            return

        if output_path.exists():
            for filename in Path_utils.get_image_paths(output_path):
                Path(filename).unlink()
        else:
            output_path.mkdir(parents=True, exist_ok=True)

        if not model_path.exists():
            io.log_err('Model directory not found. Please ensure it exists.')
            return

        import models
        model = models.import_model(args['model_name'])(
            model_path, device_args=device_args)
        converter = model.get_converter()

        input_path_image_paths = Path_utils.get_image_paths(input_path)
        alignments = None
        avatar_image_paths = None
        if converter.type == Converter.TYPE_FACE or converter.type == Converter.TYPE_FACE_AVATAR:
            if aligned_dir is None:
                io.log_err(
                    'Aligned directory not found. Please ensure it exists.')
                return

            aligned_path = Path(aligned_dir)
            if not aligned_path.exists():
                io.log_err(
                    'Aligned directory not found. Please ensure it exists.')
                return

            alignments = {}

            aligned_path_image_paths = Path_utils.get_image_paths(aligned_path)
            for filepath in io.progress_bar_generator(aligned_path_image_paths,
                                                      "Collecting alignments"):
                filepath = Path(filepath)

                if filepath.suffix == '.png':
                    dflimg = DFLPNG.load(str(filepath))
                elif filepath.suffix == '.jpg':
                    dflimg = DFLJPG.load(str(filepath))
                else:
                    dflimg = None

                if dflimg is None:
                    io.log_err("%s is not a dfl image file" % (filepath.name))
                    continue

                source_filename_stem = Path(dflimg.get_source_filename()).stem
                if source_filename_stem not in alignments.keys():
                    alignments[source_filename_stem] = []

                alignments[source_filename_stem].append(
                    dflimg.get_source_landmarks())

        if converter.type == Converter.TYPE_FACE_AVATAR:
            if avaperator_aligned_dir is None:
                io.log_err(
                    'Avatar operator aligned directory not found. Please ensure it exists.'
                )
                return

            avaperator_aligned_path = Path(avaperator_aligned_dir)
            if not avaperator_aligned_path.exists():
                io.log_err(
                    'Avatar operator aligned directory not found. Please ensure it exists.'
                )
                return

            avatar_image_paths = []
            for filename in io.progress_bar_generator(
                    Path_utils.get_image_paths(avaperator_aligned_path),
                    "Sorting avaperator faces"):
                filepath = Path(filename)
                if filepath.suffix == '.png':
                    dflimg = DFLPNG.load(str(filepath))
                elif filepath.suffix == '.jpg':
                    dflimg = DFLJPG.load(str(filepath))
                else:
                    dflimg = None

                if dflimg is None:
                    io.log_err("Fatal error: %s is not a dfl image file" %
                               (filepath.name))
                    return

                avatar_image_paths += [(filename, dflimg.get_source_filename())
                                       ]
            avatar_image_paths = [
                p[0]
                for p in sorted(avatar_image_paths, key=operator.itemgetter(1))
            ]

            if len(input_path_image_paths) < len(avatar_image_paths):
                io.log_err(
                    "Input faces count must be >= avatar operator faces count."
                )
                return

        files_processed, faces_processed = ConvertSubprocessor(
            converter=converter,
            input_path_image_paths=input_path_image_paths,
            output_path=output_path,
            alignments=alignments,
            avatar_image_paths=avatar_image_paths,
            debug=args.get('debug', False)).run()

        model.finalize()

    except Exception as e:
        print('Error: %s' % (str(e)))
        traceback.print_exc()
Beispiel #18
0
def evaluate(print_grid=False):
    with tf.device('/gpu:0'):  # run on specific device
        conflict_grids, pred, gt, mask = models.import_model(
            num_timesteps, input_size, batch_size)

    dataset = data_loader.read_datasets(data_file, dataset_type='test')

    saver = tf.train.Saver()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver.restore(sess, model_path)

        all_pred, all_gt = [], []
        for i in range(updates_per_epoch):
            conflict_grids_batch, gt_batch, mask_batch = \
                                    dataset.next_batch(batch_size)
            pred_value = sess.run(
                [pred], {
                    conflict_grids: conflict_grids_batch,
                    gt: gt_batch,
                    mask: mask_batch
                })

            pred_value = pred_value * mask_batch
            to_remove_idxs = np.where(mask_batch.flatten() < 1)
            pred_value = np.delete(pred_value.flatten(), to_remove_idxs)
            gt_batch = np.delete(gt_batch.flatten(), to_remove_idxs)
            assert (len(pred_value) == len(gt_batch))

            for k in range(len(pred_value)):
                all_pred.append(pred_value[k])
                all_gt.append(gt_batch[k])

            if print_grid:
                np.set_printoptions(precision=1, linewidth=150, suppress=True)
                print('-' * 80)
                print(np.squeeze(pred_value))
                print(np.squeeze(gt_batch))

        assert (len(all_pred) == len(all_gt))

        num_align = 0
        for i in range(len(all_pred)):
            if all_gt[i] > 0:
                if all_pred[i] > 0.5: num_align += 1
            elif all_gt[i] < 1:
                if all_pred[i] <= 0.5: num_align += 1
        print "Aligned:", float(num_align) / len(all_pred)

        threshold = 0.5
        precision_num, precision_denom = 0.0, 0.0
        for i in range(len(all_pred)):
            if all_gt[i] == 1:
                if all_pred[i] >= threshold:
                    precision_num += 1
                    precision_denom += 1
            else:
                if all_pred[i] >= threshold: precision_denom += 1

        recall_num, recall_denom = 0.0, 0.0
        for i in range(len(all_pred)):
            if all_gt[i] == 1:
                if all_pred[i] >= threshold:
                    recall_num += 1
                    recall_denom += 1
                else:
                    recall_denom += 1

        print "Precision", float(precision_num) / precision_denom
        print "Recall", float(recall_num) / recall_denom
Beispiel #19
0
def main(model_class_name, saved_models_path):
    model = models.import_model(model_class_name)(
                        is_exporting=True,
                        saved_models_path=saved_models_path,
                        cpu_only=True)
    model.export_dfm () 
def evaluate(print_grid=False):
    data_paths = [conflict_data_file, poverty_grid_file, poverty_mask_file]
    dataset, conflict_mask, poverty_grid, poverty_mask = data_loader.read_datasets(data_paths, dataset_type='test')
    with tf.device('/gpu:0'): # run on specific device
        conflict_grids, pov_grid, pred, gt = models.import_model(num_timesteps, 
						                 input_size,
                                                                 poverty_grid.shape)

    saver = tf.train.Saver() 
    
    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        saver.restore(sess, model_path)

        all_pred, all_gt = [], []
        for i in range(updates_per_epoch):
            conflict_grids_batch, gt_batch = \
                                    dataset.next_batch(batch_size)
            pred_value = sess.run([pred], 
                                  {conflict_grids : conflict_grids_batch,
				   pov_grid : poverty_grid,
                                   gt : gt_batch})

	    mask = conflict_mask * poverty_mask
            pred_value = pred_value * mask
            to_remove_idxs = np.where(mask.flatten() < 1)
            pred_value = np.delete(pred_value.flatten(), to_remove_idxs)
            gt_batch = np.delete(gt_batch.flatten(), to_remove_idxs)
            assert(len(pred_value) == len(gt_batch))

            for k in range(len(pred_value)):
                all_pred.append(pred_value[k])
                all_gt.append(gt_batch[k])

            if print_grid:
                np.set_printoptions(precision=1, linewidth = 150, suppress=True)
                print('-'*80)
                print(np.squeeze(pred_value)) 
                print(np.squeeze(gt_batch))
       
        assert(len(all_pred) == len(all_gt))
    
        num_align = 0
        for i in range(len(all_pred)):
            if all_gt[i] > 0:
                if all_pred[i] > 0.5: num_align += 1
            elif all_gt[i] < 1:
                if all_pred[i] <= 0.5: num_align += 1
        print "Aligned:", float(num_align)/len(all_pred)
    
        threshold = 0.5
        precision_num, precision_denom = 0.0, 0.0
        for i in range(len(all_pred)):
            if all_gt[i] == 1:
                if all_pred[i] >= threshold:
                    precision_num += 1
                    precision_denom += 1
            else:
                 if all_pred[i] >= threshold: precision_denom += 1
    
        recall_num, recall_denom = 0.0, 0.0
        for i in range(len(all_pred)):
            if all_gt[i] == 1:
                if all_pred[i] >= threshold:
                    recall_num += 1
                    recall_denom += 1
                else:
                    recall_denom += 1

        precision, recall, thresholds = precision_recall_curve(all_gt, all_pred)
        print "Precision", float(precision_num)/precision_denom
        print "Recall", float(recall_num)/recall_denom
Beispiel #21
0
def main(model_class_name=None,
         saved_models_path=None,
         training_data_src_path=None,
         force_model_name=None,
         input_path=None,
         output_path=None,
         aligned_path=None,
         force_gpu_idxs=None,
         cpu_only=None):
    io.log_info("Running merger.\r\n")

    try:
        if not input_path.exists():
            io.log_err('Input directory not found. Please ensure it exists.')
            return

        if not output_path.exists():
            output_path.mkdir(parents=True, exist_ok=True)

        if not saved_models_path.exists():
            io.log_err('Model directory not found. Please ensure it exists.')
            return

        is_interactive = io.input_bool("Use interactive merger?",
                                       True) if not io.is_colab() else False

        import models
        model = models.import_model(model_class_name)(
            is_training=False,
            saved_models_path=saved_models_path,
            training_data_src_path=training_data_src_path,
            force_gpu_idxs=force_gpu_idxs,
            cpu_only=cpu_only)
        merger_session_filepath = model.get_strpath_storage_for_file(
            'merger_session.dat')
        predictor_func, predictor_input_shape, cfg = model.get_MergerConfig()

        if not is_interactive:
            cfg.ask_settings()

        input_path_image_paths = pathex.get_image_paths(input_path)

        if cfg.type == MergerConfig.TYPE_MASKED:
            if not aligned_path.exists():
                io.log_err(
                    'Aligned directory not found. Please ensure it exists.')
                return

            packed_samples = None
            try:
                packed_samples = samplelib.PackedFaceset.load(aligned_path)
            except:
                io.log_err(
                    f"Error occured while loading samplelib.PackedFaceset.load {str(aligned_path)}, {traceback.format_exc()}"
                )

            if packed_samples is not None:
                io.log_info("Using packed faceset.")

                def generator():
                    for sample in io.progress_bar_generator(
                            packed_samples, "Collecting alignments"):
                        filepath = Path(sample.filename)
                        yield DFLIMG.load(
                            filepath,
                            loader_func=lambda x: sample.read_raw_file())
            else:

                def generator():
                    for filepath in io.progress_bar_generator(
                            pathex.get_image_paths(aligned_path),
                            "Collecting alignments"):
                        filepath = Path(filepath)
                        yield DFLIMG.load(filepath)

            alignments = {}
            multiple_faces_detected = False

            for dflimg in generator():
                if dflimg is None:
                    io.log_err("%s is not a dfl image file" % (filepath.name))
                    continue

                source_filename = dflimg.get_source_filename()
                if source_filename is None or source_filename == "_":
                    continue

                source_filename = Path(source_filename)
                source_filename_stem = source_filename.stem

                if source_filename_stem not in alignments.keys():
                    alignments[source_filename_stem] = []

                alignments_ar = alignments[source_filename_stem]
                alignments_ar.append(dflimg.get_source_landmarks())
                if len(alignments_ar) > 1:
                    multiple_faces_detected = True

            if multiple_faces_detected:
                io.log_info(
                    "Warning: multiple faces detected. Strongly recommended to process them separately."
                )

            frames = [
                MergeSubprocessor.Frame(frame_info=FrameInfo(
                    filepath=Path(p),
                    landmarks_list=alignments.get(Path(p).stem, None)))
                for p in input_path_image_paths
            ]

            if multiple_faces_detected:
                io.log_info(
                    "Warning: multiple faces detected. Motion blur will not be used."
                )
            else:
                s = 256
                local_pts = [(s // 2 - 1, s // 2 - 1),
                             (s // 2 - 1, 0)]  #center+up
                frames_len = len(frames)
                for i in io.progress_bar_generator(range(len(frames)),
                                                   "Computing motion vectors"):
                    fi_prev = frames[max(0, i - 1)].frame_info
                    fi = frames[i].frame_info
                    fi_next = frames[min(i + 1, frames_len - 1)].frame_info
                    if len(fi_prev.landmarks_list) == 0 or \
                       len(fi.landmarks_list) == 0 or \
                       len(fi_next.landmarks_list) == 0:
                        continue

                    mat_prev = LandmarksProcessor.get_transform_mat(
                        fi_prev.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat = LandmarksProcessor.get_transform_mat(
                        fi.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat_next = LandmarksProcessor.get_transform_mat(
                        fi_next.landmarks_list[0], s, face_type=FaceType.FULL)

                    pts_prev = LandmarksProcessor.transform_points(
                        local_pts, mat_prev, True)
                    pts = LandmarksProcessor.transform_points(
                        local_pts, mat, True)
                    pts_next = LandmarksProcessor.transform_points(
                        local_pts, mat_next, True)

                    prev_vector = pts[0] - pts_prev[0]
                    next_vector = pts_next[0] - pts[0]

                    motion_vector = pts_next[0] - pts_prev[0]
                    fi.motion_power = npla.norm(motion_vector)

                    motion_vector = motion_vector / fi.motion_power if fi.motion_power != 0 else np.array(
                        [0, 0], dtype=np.float32)

                    fi.motion_deg = -math.atan2(
                        motion_vector[1], motion_vector[0]) * 180 / math.pi

        elif cfg.type == MergerConfig.TYPE_FACE_AVATAR:
            filesdata = []
            for filepath in io.progress_bar_generator(input_path_image_paths,
                                                      "Collecting info"):
                filepath = Path(filepath)

                dflimg = DFLIMG.load(filepath)
                if dflimg is None:
                    io.log_err("%s is not a dfl image file" % (filepath.name))
                    continue
                filesdata += [
                    (FrameInfo(filepath=filepath,
                               landmarks_list=[dflimg.get_landmarks()]),
                     dflimg.get_source_filename())
                ]

            filesdata = sorted(
                filesdata,
                key=operator.itemgetter(1))  #sort by source_filename
            frames = []
            filesdata_len = len(filesdata)
            for i in range(len(filesdata)):
                frame_info = filesdata[i][0]

                prev_temporal_frame_infos = []
                next_temporal_frame_infos = []

                for t in range(cfg.temporal_face_count):
                    prev_frame_info = filesdata[max(i - t, 0)][0]
                    next_frame_info = filesdata[min(i + t,
                                                    filesdata_len - 1)][0]

                    prev_temporal_frame_infos.insert(0, prev_frame_info)
                    next_temporal_frame_infos.append(next_frame_info)

                frames.append(
                    MergeSubprocessor.Frame(
                        prev_temporal_frame_infos=prev_temporal_frame_infos,
                        frame_info=frame_info,
                        next_temporal_frame_infos=next_temporal_frame_infos))

        if len(frames) == 0:
            io.log_info("No frames to merge in input_dir.")
        else:
            MergeSubprocessor(is_interactive=is_interactive,
                              merger_session_filepath=merger_session_filepath,
                              predictor_func=predictor_func,
                              predictor_input_shape=predictor_input_shape,
                              merger_config=cfg,
                              frames=frames,
                              frames_root_path=input_path,
                              output_path=output_path,
                              model_iter=model.get_iter()).run()

        model.finalize()

    except Exception as e:
        print('Error: %s' % (str(e)))
        traceback.print_exc()
Beispiel #22
0
def trainerThread(s2c,
                  c2s,
                  e,
                  model_class_name=None,
                  saved_models_path=None,
                  training_data_src_path=None,
                  training_data_dst_path=None,
                  pretraining_data_path=None,
                  pretrained_model_path=None,
                  no_preview=False,
                  force_model_name=None,
                  force_gpu_idxs=None,
                  cpu_only=None,
                  execute_programs=None,
                  debug=False,
                  **kwargs):
    while True:
        try:
            start_time = time.time()

            save_interval_min = 15

            if not training_data_src_path.exists():
                io.log_err('Training data src directory does not exist.')
                break

            if not training_data_dst_path.exists():
                io.log_err('Training data dst directory does not exist.')
                break

            if not saved_models_path.exists():
                saved_models_path.mkdir(exist_ok=True)

            model = models.import_model(model_class_name)(
                is_training=True,
                saved_models_path=saved_models_path,
                training_data_src_path=training_data_src_path,
                training_data_dst_path=training_data_dst_path,
                pretraining_data_path=pretraining_data_path,
                pretrained_model_path=pretrained_model_path,
                no_preview=no_preview,
                force_model_name=force_model_name,
                force_gpu_idxs=force_gpu_idxs,
                cpu_only=cpu_only,
                debug=debug,
            )

            is_reached_goal = model.is_reached_iter_goal()

            shared_state = {'after_save': False}
            loss_string = ""
            save_iter = model.get_iter()

            def model_save():
                if not debug and not is_reached_goal:
                    io.log_info("Saving....", end='\r')
                    model.save()
                    shared_state['after_save'] = True

            def send_preview():
                if not debug:
                    previews = model.get_previews()
                    c2s.put({
                        'op': 'show',
                        'previews': previews,
                        'iter': model.get_iter(),
                        'loss_history': model.get_loss_history().copy()
                    })
                else:
                    previews = [('debug, press update for new',
                                 model.debug_one_iter())]
                    c2s.put({'op': 'show', 'previews': previews})
                e.set()  #Set the GUI Thread as Ready

            if model.get_target_iter() != 0:
                if is_reached_goal:
                    io.log_info(
                        'Model already trained to target iteration. You can use preview.'
                    )
                else:
                    io.log_info(
                        'Starting. Target iteration: %d. Press "Enter" to stop training and save model.'
                        % (model.get_target_iter()))
            else:
                io.log_info(
                    'Starting. Press "Enter" to stop training and save model.')

            last_save_time = time.time()

            execute_programs = [[x[0], x[1], time.time()]
                                for x in execute_programs]

            for i in itertools.count(0, 1):
                if not debug:
                    cur_time = time.time()

                    for x in execute_programs:
                        prog_time, prog, last_time = x
                        exec_prog = False
                        if prog_time > 0 and (cur_time -
                                              start_time) >= prog_time:
                            x[0] = 0
                            exec_prog = True
                        elif prog_time < 0 and (cur_time -
                                                last_time) >= -prog_time:
                            x[2] = cur_time
                            exec_prog = True

                        if exec_prog:
                            try:
                                exec(prog)
                            except Exception as e:
                                print("Unable to execute program: %s" % (prog))

                    if not is_reached_goal:

                        if model.get_iter() == 0:
                            io.log_info("")
                            io.log_info(
                                "Trying to do the first iteration. If an error occurs, reduce the model parameters."
                            )
                            io.log_info("")

                        iter, iter_time = model.train_one_iter()

                        loss_history = model.get_loss_history()
                        time_str = time.strftime("[%H:%M:%S]")
                        if iter_time >= 10:
                            loss_string = "{0}[#{1:06d}][{2:.5s}s]".format(
                                time_str, iter, '{:0.4f}'.format(iter_time))
                        else:
                            loss_string = "{0}[#{1:06d}][{2:04d}ms]".format(
                                time_str, iter, int(iter_time * 1000))

                        if shared_state['after_save']:
                            shared_state['after_save'] = False
                            last_save_time = time.time()

                            mean_loss = np.mean([
                                np.array(loss_history[i])
                                for i in range(save_iter, iter)
                            ],
                                                axis=0)

                            for loss_value in mean_loss:
                                loss_string += "[%.4f]" % (loss_value)

                            io.log_info(loss_string)

                            save_iter = iter
                        else:
                            for loss_value in loss_history[-1]:
                                loss_string += "[%.4f]" % (loss_value)

                            if io.is_colab():
                                io.log_info('\r' + loss_string, end='')
                            else:
                                io.log_info(loss_string, end='\r')

                        if model.get_iter() == 1:
                            model_save()

                        if model.get_target_iter(
                        ) != 0 and model.is_reached_iter_goal():
                            io.log_info('Reached target iteration.')
                            model_save()
                            is_reached_goal = True
                            io.log_info('You can use preview now.')

                if not is_reached_goal and (time.time() - last_save_time
                                            ) >= save_interval_min * 60:
                    model_save()
                    send_preview()

                if i == 0:
                    if is_reached_goal:
                        model.pass_one_iter()
                    send_preview()

                if debug:
                    time.sleep(0.005)

                while not s2c.empty():
                    input = s2c.get()
                    op = input['op']
                    if op == 'save':
                        model_save()
                    elif op == 'preview':
                        if is_reached_goal:
                            model.pass_one_iter()
                        send_preview()
                    elif op == 'close':
                        model_save()
                        i = -1
                        break

                if i == -1:
                    break

            model.finalize()

        except Exception as e:
            print('Error: %s' % (str(e)))
            traceback.print_exc()
        break
    c2s.put({'op': 'close'})
Beispiel #23
0
def trainerThread(s2c,
                  c2s,
                  e,
                  model_class_name=None,
                  saved_models_path=None,
                  training_data_src_path=None,
                  training_data_dst_path=None,
                  pretraining_data_path=None,
                  pretrained_model_path=None,
                  no_preview=False,
                  force_model_name=None,
                  force_gpu_idxs=None,
                  cpu_only=None,
                  silent_start=False,
                  execute_programs=None,
                  debug=False,
                  tensorboard_dir=None,
                  start_tensorboard=False,
                  **kwargs):
    while True:
        try:
            start_time = time.time()

            save_interval_min = 15
            tensorboard_preview_interval_min = 5

            if not training_data_src_path.exists():
                training_data_src_path.mkdir(exist_ok=True, parents=True)

            if not training_data_dst_path.exists():
                training_data_dst_path.mkdir(exist_ok=True, parents=True)

            if not saved_models_path.exists():
                saved_models_path.mkdir(exist_ok=True, parents=True)

            model = models.import_model(model_class_name)(
                is_training=True,
                saved_models_path=saved_models_path,
                training_data_src_path=training_data_src_path,
                training_data_dst_path=training_data_dst_path,
                pretraining_data_path=pretraining_data_path,
                pretrained_model_path=pretrained_model_path,
                no_preview=no_preview,
                force_model_name=force_model_name,
                force_gpu_idxs=force_gpu_idxs,
                cpu_only=cpu_only,
                silent_start=silent_start,
                debug=debug,
            )

            is_reached_goal = model.is_reached_iter_goal()

            if tensorboard_dir is not None:
                c2s.put({
                    'op': 'tb',
                    'action': 'init',
                    'model_name': model.model_name,
                    'tensorboard_dir': tensorboard_dir,
                    'start_tensorboard': start_tensorboard
                })

            shared_state = {'after_save': False}
            loss_string = ""
            save_iter = model.get_iter()

            def model_save():
                if not debug and not is_reached_goal:
                    io.log_info("Saving....", end='\r')
                    model.save()
                    shared_state['after_save'] = True

            def model_backup():
                if not debug and not is_reached_goal:
                    model.create_backup()

            def log_step(step, step_time, src_loss, dst_loss):
                c2s.put({
                    'op': 'tb',
                    'action': 'step',
                    'step': step,
                    'step_time': step_time,
                    'src_loss': src_loss,
                    'dst_loss': dst_loss
                })

            def log_previews(step, previews, static_previews):
                c2s.put({
                    'op': 'tb',
                    'action': 'preview',
                    'step': step,
                    'previews': previews,
                    'static_previews': static_previews
                })

            def send_preview():
                if not debug:
                    previews = model.get_previews()
                    c2s.put({
                        'op': 'show',
                        'previews': previews,
                        'iter': model.get_iter(),
                        'loss_history': model.get_loss_history().copy()
                    })
                else:
                    previews = [('debug, press update for new',
                                 model.debug_one_iter())]
                    c2s.put({'op': 'show', 'previews': previews})
                e.set()  #Set the GUI Thread as Ready

            if model.get_target_iter() != 0:
                if is_reached_goal:
                    io.log_info(
                        'Model already trained to target iteration. You can use preview.'
                    )
                else:
                    io.log_info(
                        'Starting. Target iteration: %d. Press "Enter" to stop training and save model.'
                        % (model.get_target_iter()))
            else:
                io.log_info(
                    'Starting. Press "Enter" to stop training and save model.')

            last_save_time = time.time()
            last_preview_time = time.time()

            execute_programs = [[x[0], x[1], time.time()]
                                for x in execute_programs]

            for i in itertools.count(0, 1):
                if not debug:
                    cur_time = time.time()

                    for x in execute_programs:
                        prog_time, prog, last_time = x
                        exec_prog = False
                        if prog_time > 0 and (cur_time -
                                              start_time) >= prog_time:
                            x[0] = 0
                            exec_prog = True
                        elif prog_time < 0 and (cur_time -
                                                last_time) >= -prog_time:
                            x[2] = cur_time
                            exec_prog = True

                        if exec_prog:
                            try:
                                exec(prog)
                            except Exception as e:
                                print("Unable to execute program: %s" % (prog))

                    if not is_reached_goal:

                        if model.get_iter() == 0:
                            io.log_info("")
                            io.log_info(
                                "Trying to do the first iteration. If an error occurs, reduce the model parameters."
                            )
                            io.log_info("")

                            if sys.platform[0:3] == 'win':
                                io.log_info("!!!")
                                io.log_info(
                                    "Windows 10 users IMPORTANT notice. You should set this setting in order to work correctly."
                                )
                                io.log_info("https://i.imgur.com/B7cmDCB.jpg")
                                io.log_info("!!!")

                        iter, iter_time = model.train_one_iter()

                        loss_history = model.get_loss_history()
                        time_str = time.strftime("[%H:%M:%S]")
                        if iter_time >= 10:
                            loss_string = "{0}[#{1:06d}][{2:.5s}s]".format(
                                time_str, iter, '{:0.4f}'.format(iter_time))
                        else:
                            loss_string = "{0}[#{1:06d}][{2:04d}ms]".format(
                                time_str, iter, int(iter_time * 1000))

                        if shared_state['after_save']:
                            shared_state['after_save'] = False

                            mean_loss = np.mean(loss_history[save_iter:iter],
                                                axis=0)

                            for loss_value in mean_loss:
                                loss_string += "[%.4f]" % (loss_value)

                            io.log_info(loss_string)

                            save_iter = iter
                        else:
                            for loss_value in loss_history[-1]:
                                loss_string += "[%.4f]" % (loss_value)

                            if io.is_colab():
                                io.log_info('\r' + loss_string, end='')
                            else:
                                io.log_info(loss_string, end='\r')

                        loss_entry = loss_history[-1]
                        log_step(
                            iter, iter_time, loss_entry[0],
                            loss_entry[1] if len(loss_entry) > 1 else None)

                        if model.get_iter() == 1:
                            model_save()

                        if model.get_target_iter(
                        ) != 0 and model.is_reached_iter_goal():
                            io.log_info('Reached target iteration.')
                            model_save()
                            is_reached_goal = True
                            io.log_info('You can use preview now.')

                if not is_reached_goal and (
                        time.time() - last_preview_time
                ) >= tensorboard_preview_interval_min * 60:
                    last_preview_time += tensorboard_preview_interval_min * 60
                    previews = model.get_previews()
                    static_previews = model.get_static_previews()
                    log_previews(iter, previews, static_previews)

                if not is_reached_goal and (time.time() - last_save_time
                                            ) >= save_interval_min * 60:
                    last_save_time += save_interval_min * 60
                    model_save()
                    send_preview()

                if i == 0:
                    if is_reached_goal:
                        model.pass_one_iter()
                    send_preview()

                if debug:
                    time.sleep(0.005)

                while not s2c.empty():
                    input = s2c.get()
                    op = input['op']
                    if op == 'save':
                        model_save()
                    elif op == 'backup':
                        model_backup()
                    elif op == 'preview':
                        if is_reached_goal:
                            model.pass_one_iter()
                        send_preview()
                    elif op == 'close':
                        model_save()
                        i = -1
                        break

                if i == -1:
                    break

            model.finalize()

        except Exception as e:
            print('Error: %s' % (str(e)))
            traceback.print_exc()
        break
    c2s.put({'op': 'close'})
Beispiel #24
0
def main(model_class_name=None,
         saved_models_path=None,
         training_data_src_path=None,
         force_model_name=None,
         input_path=None,
         output_path=None,
         output_mask_path=None,
         aligned_path=None,
         force_gpu_idxs=None,
         cpu_only=None):
    io.log_info("Running merger.\r\n")

    try:
        if not input_path.exists():
            io.log_err('Input directory not found. Please ensure it exists.')
            return

        if not output_path.exists():
            output_path.mkdir(parents=True, exist_ok=True)

        if not output_mask_path.exists():
            output_mask_path.mkdir(parents=True, exist_ok=True)

        if not saved_models_path.exists():
            io.log_err('Model directory not found. Please ensure it exists.')
            return

        # Initialize model
        import models
        model = models.import_model(model_class_name)(
            is_training=False,
            saved_models_path=saved_models_path,
            force_gpu_idxs=force_gpu_idxs,
            cpu_only=cpu_only)

        predictor_func, predictor_input_shape, cfg = model.get_MergerConfig()

        # Preparing MP functions
        predictor_func = MPFunc(predictor_func)

        run_on_cpu = len(nn.getCurrentDeviceConfig().devices) == 0
        xseg_256_extract_func = MPClassFuncOnDemand(
            XSegNet,
            'extract',
            name='XSeg',
            resolution=256,
            weights_file_root=saved_models_path,
            place_model_on_cpu=True,
            run_on_cpu=run_on_cpu)

        face_enhancer_func = MPClassFuncOnDemand(FaceEnhancer,
                                                 'enhance',
                                                 place_model_on_cpu=True,
                                                 run_on_cpu=run_on_cpu)

        is_interactive = io.input_bool("Use interactive merger?",
                                       True) if not io.is_colab() else False

        #         if not is_interactive:
        #             cfg.ask_settings()

        subprocess_count = multiprocessing.cpu_count()
        #         subprocess_count = io.input_int("Number of workers?", max(8, multiprocessing.cpu_count()),
        #                                         valid_range=[1, multiprocessing.cpu_count()], help_message="Specify the number of threads to process. A low value may affect performance. A high value may result in memory error. The value may not be greater than CPU cores." )

        input_path_image_paths = pathex.get_image_paths(input_path)

        if cfg.type == MergerConfig.TYPE_MASKED:
            if not aligned_path.exists():
                io.log_err(
                    'Aligned directory not found. Please ensure it exists.')
                return

            packed_samples = None
            try:
                packed_samples = samplelib.PackedFaceset.load(aligned_path)
            except:
                io.log_err(
                    f"Error occured while loading samplelib.PackedFaceset.load {str(aligned_path)}, {traceback.format_exc()}"
                )

            if packed_samples is not None:
                io.log_info("Using packed faceset.")

                def generator():
                    for sample in io.progress_bar_generator(
                            packed_samples, "Collecting alignments"):
                        filepath = Path(sample.filename)
                        yield filepath, DFLIMG.load(
                            filepath,
                            loader_func=lambda x: sample.read_raw_file())
            else:

                def generator():
                    for filepath in io.progress_bar_generator(
                            pathex.get_image_paths(aligned_path),
                            "Collecting alignments"):
                        filepath = Path(filepath)
                        yield filepath, DFLIMG.load(filepath)

            alignments = {}
            multiple_faces_detected = False

            for filepath, dflimg in generator():
                if dflimg is None or not dflimg.has_data():
                    io.log_err(f"{filepath.name} is not a dfl image file")
                    continue

                source_filename = dflimg.get_source_filename()
                if source_filename is None:
                    continue

                source_filepath = Path(source_filename)
                source_filename_stem = source_filepath.stem

                if source_filename_stem not in alignments.keys():
                    alignments[source_filename_stem] = []

                alignments_ar = alignments[source_filename_stem]
                alignments_ar.append(
                    (dflimg.get_source_landmarks(), filepath, source_filepath))

                if len(alignments_ar) > 1:
                    multiple_faces_detected = True

            if multiple_faces_detected:
                io.log_info("")
                io.log_info(
                    "Warning: multiple faces detected. Only one alignment file should refer one source file."
                )
                io.log_info("")

            for a_key in list(alignments.keys()):
                a_ar = alignments[a_key]
                if len(a_ar) > 1:
                    for _, filepath, source_filepath in a_ar:
                        io.log_info(
                            f"alignment {filepath.name} refers to {source_filepath.name} "
                        )
                    io.log_info("")

                alignments[a_key] = [a[0] for a in a_ar]

            if multiple_faces_detected:
                io.log_info(
                    "It is strongly recommended to process the faces separatelly."
                )
                io.log_info(
                    "Use 'recover original filename' to determine the exact duplicates."
                )
                io.log_info("")

            frames = [
                InteractiveMergerSubprocessor.Frame(frame_info=FrameInfo(
                    filepath=Path(p),
                    landmarks_list=alignments.get(Path(p).stem, None)))
                for p in input_path_image_paths
            ]

            if multiple_faces_detected:
                io.log_info(
                    "Warning: multiple faces detected. Motion blur will not be used."
                )
                io.log_info("")
            else:
                s = 256
                local_pts = [(s // 2 - 1, s // 2 - 1),
                             (s // 2 - 1, 0)]  #center+up
                frames_len = len(frames)
                for i in io.progress_bar_generator(range(len(frames)),
                                                   "Computing motion vectors"):
                    fi_prev = frames[max(0, i - 1)].frame_info
                    fi = frames[i].frame_info
                    fi_next = frames[min(i + 1, frames_len - 1)].frame_info
                    if len(fi_prev.landmarks_list) == 0 or \
                       len(fi.landmarks_list) == 0 or \
                       len(fi_next.landmarks_list) == 0:
                        continue

                    mat_prev = LandmarksProcessor.get_transform_mat(
                        fi_prev.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat = LandmarksProcessor.get_transform_mat(
                        fi.landmarks_list[0], s, face_type=FaceType.FULL)
                    mat_next = LandmarksProcessor.get_transform_mat(
                        fi_next.landmarks_list[0], s, face_type=FaceType.FULL)

                    pts_prev = LandmarksProcessor.transform_points(
                        local_pts, mat_prev, True)
                    pts = LandmarksProcessor.transform_points(
                        local_pts, mat, True)
                    pts_next = LandmarksProcessor.transform_points(
                        local_pts, mat_next, True)

                    prev_vector = pts[0] - pts_prev[0]
                    next_vector = pts_next[0] - pts[0]

                    motion_vector = pts_next[0] - pts_prev[0]
                    fi.motion_power = npla.norm(motion_vector)

                    motion_vector = motion_vector / fi.motion_power if fi.motion_power != 0 else np.array(
                        [0, 0], dtype=np.float32)

                    fi.motion_deg = -math.atan2(
                        motion_vector[1], motion_vector[0]) * 180 / math.pi

        if len(frames) == 0:
            io.log_info("No frames to merge in input_dir.")
        else:
            if False:
                pass
            else:
                InteractiveMergerSubprocessor(
                    is_interactive=is_interactive,
                    merger_session_filepath=model.get_strpath_storage_for_file(
                        'merger_session.dat'),
                    predictor_func=predictor_func,
                    predictor_input_shape=predictor_input_shape,
                    face_enhancer_func=face_enhancer_func,
                    xseg_256_extract_func=xseg_256_extract_func,
                    merger_config=cfg,
                    frames=frames,
                    frames_root_path=input_path,
                    output_path=output_path,
                    output_mask_path=output_mask_path,
                    model_iter=model.get_iter(),
                    subprocess_count=subprocess_count,
                ).run()

        model.finalize()

    except Exception as e:
        print(traceback.format_exc())
Beispiel #25
0
def train():
    data_paths = [
        conflict_data_file, climate_data_file, poverty_grid_file,
        poverty_mask_file
    ]
    dataset, conflict_mask, poverty_grid, poverty_mask = data_loader.read_datasets(
        data_paths)

    with tf.device('/gpu:0'):  # run on specific device
        conflict_grids, climate_grids, pov_grid, pred, gt = models.import_model(
            num_timesteps, input_size, poverty_grid.shape, input_size)
        loss = get_loss(pred, gt, conflict_mask, poverty_mask)
        optimizer = tf.train.AdamOptimizer(learning_rate, epsilon=1.0)
        train = optimizer.minimize(loss=loss)

    saver = tf.train.Saver()  # defaults to saving all variables

    # logging the loss function
    loss_placeholder = tf.placeholder(tf.float32)
    tf.scalar_summary('train_loss', loss_placeholder)

    merged = tf.merge_all_summaries()

    init = tf.initialize_all_variables()

    with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess:
        writer = tf.train.SummaryWriter(
            os.path.join(working_directory, 'logs'), sess.graph_def)
        sess.run(init)

        for epoch in range(max_epoch):
            training_loss = 0.0

            widgets = ["epoch #%d|" % epoch, Percentage(), Bar(), ETA()]
            pbar = ProgressBar(updates_per_epoch, widgets=widgets)
            pbar.start()
            for i in range(updates_per_epoch):
                pbar.update(i)
                conflict_grids_batch, gt_batch, climate_grids_batch = \
     dataset.next_batch(batch_size)
                _, loss_value = sess.run(
                    [train, loss], {
                        conflict_grids: conflict_grids_batch,
                        climate_grids: climate_grids_batch,
                        pov_grid: poverty_grid,
                        gt: gt_batch
                    })
                training_loss += np.sum(loss_value)

            training_loss = training_loss / (updates_per_epoch * batch_size)
            print("Loss %f" % training_loss)

            # save model
            if epoch % save_frequency == 0:
                checkpoints_folder = os.path.join(working_directory,
                                                  'checkpoints')
                if not os.path.exists(checkpoints_folder):
                    os.makedirs(checkpoints_folder)
                saver.save(sess,
                           os.path.join(checkpoints_folder, 'model.ckpt'),
                           global_step=epoch)

                # save summaries
                summary_str = sess.run(merged,
                                       feed_dict={
                                           conflict_grids:
                                           conflict_grids_batch,
                                           climate_grids: climate_grids_batch,
                                           gt: gt_batch,
                                           pov_grid: poverty_grid,
                                           loss_placeholder: training_loss
                                       })
                writer.add_summary(summary_str, global_step=epoch)

        writer.close()
Beispiel #26
0
def trainerThread(s2c, c2s, args, device_args):
    while True:
        try:
            start_time = time.time()

            training_data_src_path = Path(args.get('training_data_src_dir',
                                                   ''))
            training_data_dst_path = Path(args.get('training_data_dst_dir',
                                                   ''))
            model_path = Path(args.get('model_path', ''))
            model_name = args.get('model_name', '')
            save_interval_min = 15
            debug = args.get('debug', '')
            execute_programs = args.get('execute_programs', [])

            if not training_data_src_path.exists():
                io.log_err('Training data src directory does not exist.')
                break

            if not training_data_dst_path.exists():
                io.log_err('Training data dst directory does not exist.')
                break

            if not model_path.exists():
                model_path.mkdir(exist_ok=True)

            model = models.import_model(model_name)(
                model_path,
                training_data_src_path=training_data_src_path,
                training_data_dst_path=training_data_dst_path,
                debug=debug,
                device_args=device_args)

            is_reached_goal = model.is_reached_iter_goal()

            shared_state = {'after_save': False}
            loss_string = ""
            save_iter = model.get_iter()

            def model_save():
                if not debug and not is_reached_goal:
                    io.log_info("Saving....", end='\r')
                    model.save()
                    shared_state['after_save'] = True

            def send_preview():
                if not debug:
                    previews = model.get_previews()
                    c2s.put({
                        'op': 'show',
                        'previews': previews,
                        'iter': model.get_iter(),
                        'loss_history': model.get_loss_history().copy()
                    })
                else:
                    previews = [('debug, press update for new',
                                 model.debug_one_iter())]
                    c2s.put({'op': 'show', 'previews': previews})

            if model.is_first_run():
                model_save()

            if model.get_target_iter() != 0:
                if is_reached_goal:
                    io.log_info(
                        'Model already trained to target iteration. You can use preview.'
                    )
                else:
                    io.log_info(
                        'Starting. Target iteration: %d. Press "Enter" to stop training and save model.'
                        % (model.get_target_iter()))
            else:
                io.log_info(
                    'Starting. Press "Enter" to stop training and save model.')

            last_save_time = time.time()

            for i in itertools.count(0, 1):
                if not debug:
                    cur_time = time.time()

                    for x in execute_programs:
                        prog_time, prog = x
                        if prog_time != 0 and (cur_time -
                                               start_time) >= prog_time:
                            x[0] = 0
                            try:
                                exec(prog)
                            except Exception as e:
                                print("Unable to execute program: %s" % (prog))

                    if not is_reached_goal:
                        iter, iter_time = model.train_one_iter()

                        loss_history = model.get_loss_history()
                        time_str = time.strftime("[%H:%M:%S]")
                        if iter_time >= 10:
                            loss_string = "{0}[#{1:06d}][{2:.5s}s]".format(
                                time_str, iter, '{:0.4f}'.format(iter_time))
                        else:
                            loss_string = "{0}[#{1:06d}][{2:04d}ms]".format(
                                time_str, iter, int(iter_time * 1000))

                        if shared_state['after_save']:
                            shared_state['after_save'] = False
                            last_save_time = time.time(
                            )  #upd last_save_time only after save+one_iter, because plaidML rebuilds programs after save https://github.com/plaidml/plaidml/issues/274

                            mean_loss = np.mean([
                                np.array(loss_history[i])
                                for i in range(save_iter, iter)
                            ],
                                                axis=0)

                            for loss_value in mean_loss:
                                loss_string += "[%.4f]" % (loss_value)

                            io.log_info(loss_string)

                            save_iter = iter
                        else:
                            for loss_value in loss_history[-1]:
                                loss_string += "[%.4f]" % (loss_value)

                            if io.is_colab():
                                io.log_info('\r' + loss_string, end='')
                            else:
                                io.log_info(loss_string, end='\r')

                        if model.get_target_iter(
                        ) != 0 and model.is_reached_iter_goal():
                            io.log_info('Reached target iteration.')
                            model_save()
                            is_reached_goal = True
                            io.log_info('You can use preview now.')

                if not is_reached_goal and (time.time() - last_save_time
                                            ) >= save_interval_min * 60:
                    model_save()
                    send_preview()

                if i == 0:
                    if is_reached_goal:
                        model.pass_one_iter()
                    send_preview()

                if debug:
                    time.sleep(0.005)

                while not s2c.empty():
                    input = s2c.get()
                    op = input['op']
                    if op == 'save':
                        model_save()
                    elif op == 'preview':
                        if is_reached_goal:
                            model.pass_one_iter()
                        send_preview()
                    elif op == 'close':
                        model_save()
                        i = -1
                        break

                if i == -1:
                    break

            model.finalize()

        except Exception as e:
            print('Error: %s' % (str(e)))
            traceback.print_exc()
        break
    c2s.put({'op': 'close'})
Beispiel #27
0
    def PCA(self,
            path=None,
            trained_model=None,
            reduced_dim=40,
            need_pca_result=False):
        '''
        Compare the original accuracy and PCA-dimension-reduced accuracy results.
        Here, the PCA-dimension-reduced results are represented in the original
        dimension basis. i.e. inverse transform the dimension-reduced results.

        :param path: The path to the pth file.
        :param trained_model: If use trained model, pass this model to this method.
        :param reduced_dim: Use PCA to reduce the dimension to reduced_dim.
        :param need_pca_result: Whether do we need PCA performance result.
        :return: pca_acc, ori_acc
            pca_acc: The accuracy of PCA-classifier on the valid set.
            ori_acc: The accuracy of original neural network classifier of the valid set.
        '''

        # Now feature is a (n, 512 * 7 * 7) matrix. Perform PCA on it to extract useful dimension information.
        trainset_features, gt, model = self.get_features(
            self.data_loader_train, path, trained_model)

        if need_pca_result:
            result_dir = './result/pca.csv'
            result = pd.DataFrame(
                columns=['n_features', 'pca_acc', 'original_acc'])

            for components in range(2, 100):
                for _ in range(10):
                    pca_acc = 0
                    ori_acc = 0
                    pca = decomposition.PCA(n_components=components)
                    features = trainset_features
                    pca.fit(features)

                    # Here feature is a (n, 512 * 7 * 7) matrix which has been performed PCA.
                    with torch.no_grad():
                        for data in self.data_loader_valid:
                            x, y = data
                            y = np.argmax(y, axis=1)
                            x, y = Variable(x).to(self.device), \
                                   Variable(y).to(self.device)
                            batch_gt = y.data

                            batch_features = model.get_features(
                                x).cpu().numpy()
                            batch_features_pca = pca.transform(batch_features)
                            batch_features_pca = pca.inverse_transform(
                                batch_features_pca)

                            batch_features = torch.Tensor(batch_features).to(
                                self.device)
                            batch_features_pca = torch.Tensor(
                                batch_features_pca).to(self.device)

                            batch_outputs = model.get_classification(
                                batch_features)
                            batch_outputs_pca = model.get_classification(
                                batch_features_pca)
                            _, pred = torch.max(batch_outputs.data, 1)
                            _, pred_pca = torch.max(batch_outputs_pca.data, 1)
                            batch_acc = torch.sum(pred == batch_gt)
                            batch_acc_pca = torch.sum(pred_pca == batch_gt)
                            pca_acc += batch_acc_pca
                            ori_acc += batch_acc

                    sample_count = max(1, len(self.data_valid))
                    print('%03d, %.5f | %.5f' %
                          (components, pca_acc / sample_count,
                           ori_acc / sample_count))
                    result.loc[len(result)] = {
                        'n_features': components,
                        'pca_acc': (pca_acc / sample_count).item(),
                        'original_acc': (ori_acc / sample_count).item()
                    }

            result.to_csv(result_dir, float_format='%.3f', index=False)

        else:
            Log.log(Log.INFO, 'Start training PCA classifier...')
            pca_model = import_model('PCA')(reduced_dim,
                                            model.class_num).to(self.device)
            pca_epochs = 200
            pca = decomposition.PCA(n_components=reduced_dim)
            trainset_dim_reduced_features = pca.fit_transform(
                trainset_features)
            optimizer = torch.optim.Adam(pca_model.parameters())
            cost = torch.nn.CrossEntropyLoss()

            for epoch in range(pca_epochs):
                Log.log(Log.INFO, '-' * 20)
                Log.log(Log.INFO, 'Epoch %d/%d' % (epoch + 1, pca_epochs))

                x, y = trainset_dim_reduced_features, gt
                y = torch.from_numpy(np.argmax(y, axis=1))
                x, y = Variable(torch.Tensor(x)).to(self.device), \
                       Variable(y).to(self.device)
                outputs = pca_model(x)
                _, pred = torch.max(outputs.data, 1)

                optimizer.zero_grad()
                loss = cost(outputs, y)
                loss.backward()
                optimizer.step()
                epoch_loss = loss.data
                epoch_acc = torch.sum(y.data == pred)

                torch.cuda.empty_cache()

                Log.log(
                    Log.INFO,
                    'PCA Epoch [%02d] loss = %.5f, Epoch train acc = %.5f' %
                    (epoch + 1, epoch_loss / len(self.data_train),
                     epoch_acc / len(self.data_train)))

                if (epoch + 1) % 20 == 0 or epoch == pca_epochs - 1:
                    Log.log(Log.INFO, 'Start testing PCA classifier...')
                    pca_acc = 0
                    ori_acc = 0
                    with torch.no_grad():
                        for data in tqdm(self.data_loader_valid):
                            x, y = data
                            y = np.argmax(y, axis=1)
                            x, y = Variable(x).to(self.device), \
                                   Variable(y).to(self.device)
                            batch_size = x.shape[0]
                            features = model.get_features(x).cpu().numpy()
                            dim_reduced_features = pca.transform(features)
                            pca_outputs = pca_model(
                                Variable(
                                    torch.Tensor(dim_reduced_features)).to(
                                        self.device))
                            _, pca_pred = torch.max(pca_outputs.data, 1)
                            ori_outputs = model(x)
                            _, ori_pred = torch.max(ori_outputs.data, 1)
                            batch_pca_acc = torch.sum(y.data == pca_pred)
                            batch_ori_acc = torch.sum(y.data == ori_pred)
                            pca_acc += batch_pca_acc
                            ori_acc += batch_ori_acc

                    Log.log(
                        Log.INFO, 'PCA acc. %.3f, Original NN acc. %.3f.' %
                        (pca_acc / len(self.data_valid),
                         ori_acc / len(self.data_valid)))

            return pca_acc / len(self.data_valid), ori_acc / len(
                self.data_valid)