def main(conf_name, gpu):
    # Initialize configs and prepare result dir with date
    if conf_name is None:
        conf = configs.Config()
    else:
        conf = configs.X2_REAL_CONF
        # conf = None
        # exec ('conf = configs.%s' % conf_name)
    res_dir = prepare_result_dir(conf)
    local_dir = os.path.dirname(__file__)

    # We take all png files that are not ground truth
    files = [file_path for file_path in glob.glob('%s/*.png' % conf.input_path)
             if not file_path[-7:-4] == '_gt']

    # Loop over all the files
    for file_ind, input_file in enumerate(files):

        # Ground-truth file needs to be like the input file with _gt (if exists)
        ground_truth_file = input_file[:-4] + '_gt.png'
        if not os.path.isfile(ground_truth_file):
            ground_truth_file = '0'

        # Numeric kernel files need to be like the input file with serial number
        kernel_files = ['%s_%d.mat;' % (input_file[:-4], ind) for ind in range(len(conf.scale_factors))]
        kernel_files_str = ''.join(kernel_files)
        for kernel_file in kernel_files:
            if not os.path.isfile(kernel_file[:-1]):
                kernel_files_str = '0'
                print('no kernel loaded')
                break

        print(kernel_files)

        # This option uses all the gpu resources efficiently
        if gpu == 'all':

            # Stay stuck in this loop until there is some gpu available with at least half capacity
            gpus = []
            while not gpus:
                gpus = GPUtil.getAvailable(order='memory')

            # Take the gpu with the most free memory
            cur_gpu = gpus[-1]

            # Run ZSSR from command line, open xterm for each run
            os.system("xterm -hold -e " + conf.python_path +
                      " %s/run_ZSSR_single_input.py '%s' '%s' '%s' '%s' '%s' '%s' alias python &"
                      % (local_dir, input_file, ground_truth_file, kernel_files_str, cur_gpu, conf_name, res_dir))

            # Verbose
            print('Ran file #%d: %s on GPU %d\n' % (file_ind, input_file, cur_gpu))

            # Wait 5 seconds for the previous process to start using GPU. if we wouldn't wait then GPU memory will not
            # yet be taken and all process will start on the same GPU at once and later collapse.
            sleep(5)

        # The other option is just to run sequentially on a chosen GPU.
        else:
            run_ZSSR_single_input.main(input_file, ground_truth_file, kernel_files_str, gpu, conf_name, res_dir)
Example #2
0
 def __init__(self, path: pathlib.Path):
     super().__init__()
     self._config = configs.Config(path)
     self._generator = GeneratorMock(self._config)
     self.generator_mock = self._generator
     self._rcon = ConsoleMock()
     self.console_mock = self._rcon
     self._map_painter = PainterMock()
Example #3
0
def main(input_img, ground_truth, kernels, gpu, conf_str, results_path):
    # Choose the wanted GPU
    

    # 0 input for ground-truth or kernels means None
    ground_truth = None if ground_truth == '0' else ground_truth
    print('*****', kernels)
    kernels = None if kernels == '0' else kernels.split(';')[:-1]

    # Setup configuration and results directory
    conf = configs.Config()
    if conf_str is not None:
        exec ('conf = configs.%s' % conf_str)
    conf.result_path = results_path

    # Run ZSSR on the image
    net = ZSSR.ZSSR(input_img, conf, ground_truth, kernels)
    net.run()
 def __init__(self,
              base_api,
              resource,
              dm_api,
              gcloud_stage,
              gcloud_flags=''):
     self.base_api = base_api
     self.gcloud_stage = gcloud_stage
     self.resource = resource
     self.dm_api = dm_api
     self.gcloud_flags = gcloud_flags
     self.base_yaml = {}
     self._set_yaml_base()
     self.properties = {}
     if configs.__file__[-3:] == 'pyc':
         path = configs.__file__[:-4]
     else:
         path = configs.__file__[:-3]
     self.config = configs.Config(path + ".yaml")
Example #5
0
def main(input_img, ground_truth, kernels, gpu, conf_str, results_path):
    # Choose the wanted GPU
    if gpu is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = '%s' % gpu

    # 0 input for ground-truth or kernels means None
    ground_truth = None if ground_truth == '0' else ground_truth
    print('*****', kernels)
    kernels = None if kernels == '0' else kernels.split(';')[:-1]

    # Setup configuration and results directory
    if conf_str is not None:
        conf = getattr(configs, conf_str)
    else:
        conf = configs.Config()
    conf.result_path = results_path

    # Run ZSSR on the image
    net = ZSSR.ZSSR(input_img, conf, ground_truth, kernels)
    net.run()
Example #6
0
def main(input_img, ground_truth = None, kernels = None, gpu = None, conf_str = None, results_path = './'):
    # Choose the wanted GPU
    if gpu is not None:
        os.environ["CUDA_VISIBLE_DEVICES"] = '%s' % gpu

    # 0 input for ground-truth or kernels means None
    #ground_truth = None if ground_truth == '0' else ground_truth
    
    if (kernels is not None):
        print ('*****', kernels)
        kernels.split(';')[:-1]

    # Setup configuration and results directory
    conf = configs.Config()
    if conf_str is not None:
        exec ('conf = configs.%s' % conf_str)
    conf.result_path = results_path

    # Run ZSSR on the image
    net = ZSSR.ZSSR(input_img, conf, ground_truth, kernels)
    net.run()
Example #7
0
    def __init__(self):
        """Initialization interface."""
        self.config = configs.Config()
        with open('languages.dat', 'rb') as lang_file:
            lang_dict = pickle.load(lang_file)
            self.config.set_languages(lang_dict['languages'])
            self.phrases = configs.load(
                lang_dict[self.config.general_language])
        super().__init__(None, wx.ID_ANY, self.phrases.titles.caption)
        self.command = Commands(self)
        self.menu = Menu(self)

        self.panel = wx.Panel(self, wx.ID_ANY)
        sizer_panel = wx.BoxSizer(wx.HORIZONTAL)
        sizer_panel.Add(self.panel, 1, wx.EXPAND | wx.ALL)
        self.SetSizer(sizer_panel)

        self.CreateStatusBar()
        self.__create_widgets()
        self.__create_bindings()
        self.set_values()
Example #8
0
def main(input_img, ground_truth, kernels, gpu, conf_str, results_path):
    # Choose the wanted GPU

    # 0 input for ground-truth or kernels means None
    ground_truth = None if ground_truth == '0' else ground_truth
    print('*****', kernels)
    kernels = None if kernels == '0' else kernels.split(';')[:-1]

    # Setup configuration and results directory
    conf = configs.Config()
    if conf_str is not None:
        exec('conf = configs.%s' % conf_str)
    conf.result_path = results_path

    # Run ZSSR on the image
    # os.environ["OMP_NUM_THREADS"] = "1"
    torch.set_num_threads(20)
    net = ZSSR.ZSSR(input_img, conf, ground_truth, kernels)
    output, gt = net.run()
    error = np.mean((output - gt)**2)
    psnr = -10 * np.log10(error)
    print("Test Error {:.3f} PSNR {:.3f}".format(error, psnr))
    return error, psnr
Example #9
0
def main(net_config, ckpt_for_init):
    
    ## load the config
    config = configs.Config(net_config)

    ## set the logger
    re_create_dir = False
    if ckpt_for_init == "":
        re_create_dir = True
    log_dir = helper.make_dir([config.log_dir], re_create_dir = re_create_dir)
    log_file = os.path.join(log_dir, config.net_config + '.info')
    logger = helper.Logger(log_file)
    logger.add(config.config_str, do_print=True)

    ## load the dasets from the csv file (train, val, feat_len)
    data = input_data.load_datasets(config.input_csv) # data has train.next_batch(xx) test.images. test.labels
    feat_len = data.feat_len
    
    ## set the input placeholders
    layer = 'input'
    with tf.name_scope(layer) as scope:
        x = tf.placeholder(tf.float32, [None, feat_len], name='input')
        y = tf.placeholder(tf.float32, [None, 1], name = 'output')
        keep_prob = tf.placeholder(tf.float32, name = 'keep_prob')

    ## call inference and compute the output
    y_ = deepnets.inference(config, input_tensors = {"x": x, "keep_prob": keep_prob})

    ## set the global step
    global_step = tf_utils.get_global_step()

    ## do training
    with tf.name_scope('training') as scope:
        train_loss = loss.compute_loss(est=y_, gt=y, loss_func= config.train_loss)
        train_summary =  tf.summary.scalar('train_loss', train_loss)
        # train_step = tf.train.GradientDescentOptimizer(LEARNING_RATE).minimize(train_cost)
        train_step = tf.train.AdamOptimizer(config.learning_rate).minimize(train_loss, global_step=global_step)
    
    ## tensors to compute the validatoin loss
    with tf.name_scope('validation') as scope:
        val_loss = loss.compute_loss(est=y_, gt=y, loss_func= config.test_loss)
        val_summary =  tf.summary.scalar('val_loss', val_loss)

    init_op = tf.initialize_all_variables()
    sess = tf.Session()
    sess.run(init_op)

    ## saving and restoring operations
    restore_variables = tf_utils.get_model_varaibles() +\
                        tf.get_collection("GLOBAL_STEP")+\
                        tf.get_collection('BN_VARIABLES')
    saver = tf.train.Saver(restore_variables)
    step_init = tf_utils.restore_model(config, sess, restore_variables, ckpt_for_init, logger)

    
    # write the graph (both txt and binary)
    tf.train.write_graph(sess.graph_def, log_dir, config.net_config + '_graph.pb', as_text=False)
    tf.train.write_graph(sess.graph_def, log_dir, config.net_config + '_graph.txt', as_text=True)

    summary_writer = tf.summary.FileWriter(log_dir, sess.graph)

    # only saving the checkpoints if the loss is better than the previous one
    last_saved_loss = 100.0
    for step in range(step_init, config.max_steps):
        # do the optimisation 
        batch_x, batch_y = data.train.next_batch(config.batch_size)
        feed = {x: batch_x, y:batch_y, keep_prob: 0.6}
        _, t_loss, t_summary = sess.run([train_step, train_loss, train_summary], feed_dict=feed)
        summary_writer.add_summary(t_summary, step)

        # do the validataion for every 10th step
        if(step%10 ==0):
            feed = {x:data.val.features, y: data.val.output, keep_prob: 1.0}
            v_loss, v_summary = sess.run([val_loss, val_summary], feed_dict=feed)
            summary_writer.add_summary(v_summary, step)
        
        #save the model for every 500th step
        if(step%500 ==0):
            logger.add('step {:05d} | train_loss {:f} |  val_loss {:f}'.format(step, t_loss, v_loss), do_print=True)
            if v_loss < last_saved_loss:
                checkpoint_path = os.path.join(log_dir,  config.net_config + '.ckpt')
                saver.save(sess, checkpoint_path, global_step=step)
                logger.save()
                last_saved_loss = v_loss
Example #10
0
 def config(self) -> configs.Config:
     """Конфигурация приложения"""
     if not self._config:
         self._config = configs.Config(pathlib.Path('./configs/conf.ini'))
     return self._config
def conf():
    """
    設定を返す
    :return: 設定
    """
    return configs.Config(pathlib.Path.cwd().parent / 'configs')
Example #12
0
    return args


"""
To launch tensorboard, open a Terminal window and run
tensorboard --logdir=results/
Then, connect remotely to
address-ip-of-the-server:6006
6006 is the default port used by tensorboard.
"""
if __name__ == '__main__':

    args = parse_args()
    pprint.pprint(args.__dict__)

    config = configs.Config(**args.__dict__)

    if args.force and os.path.exists(config.output_path):
        shutil.rmtree(config.output_path)

    if os.path.exists(config.output_path) and not args.test:
        raise ValueError('{} already exists!'.format(config.output_path))

    if args.test:
        reduction = 1000
        config.saving_freq //= reduction
        config.nsteps_train //= reduction
        config.buffer_size //= reduction
        config.target_update_freq //= reduction
        config.eps_nsteps //= reduction
        config.learning_start //= reduction
Example #13
0
def main(net_config, ckpt_for_init):
    
    ## load the config
    config = configs.Config(net_config)

    ## set the logger
    test_dir = os.path.join(config.log_dir, "test")
    log_dir = helper.make_dir([test_dir], re_create_dir = True)
    log_file = os.path.join(log_dir, config.net_config + '_test.txt')
    csv_file = os.path.join(log_dir, config.net_config + '_test.csv')
    logger = helper.Logger(log_file)
    logger.add(config.config_str, do_print=True)

    ## load the dasets from the csv file (train, val, feat_len)
    data = input_data.load_datasets(config.input_csv) # data has train.next_batch(xx) test.images. test.labels
    feat_len = data.feat_len

    ## set the input placeholders
    layer = 'input'
    with tf.name_scope(layer) as scope:
        x = tf.placeholder(tf.float32, [None, feat_len], name='input')
        y = tf.placeholder(tf.float32, [None, 1], name = 'output')
        keep_prob = tf.constant(1.0, name = 'keep_prob')

    ## call inference and compute the output
    y_ = deepnets.inference(config, input_tensors = {"x": x, "keep_prob": keep_prob})

    ## set the global step
    global_step = tf_utils.get_global_step()

    ## tensors to compute the validatoin loss
    with tf.name_scope('validation') as scope:
        val_loss = loss.compute_loss(est=y_, gt=y, loss_func= config.test_loss)
        val_summary =  tf.summary.scalar('val_loss', val_loss)

    init_op = tf.initialize_all_variables()
    sess = tf.Session()
    sess.run(init_op)

    ## saving and restoring operations
    restore_variables = tf_utils.get_model_varaibles() +\
                        tf.get_collection("GLOBAL_STEP")+\
                        tf.get_collection('BN_VARIABLES')
    saver = tf.train.Saver(restore_variables)
    step_init = tf_utils.restore_model(config, sess, restore_variables, ckpt_for_init, logger)

    summary_writer = tf.summary.FileWriter(log_dir, sess.graph)

    # do the validation
    features = np.concatenate((data.train.features, data.val.features), axis=0)
    output= np.concatenate((data.train.output, data.val.output), axis=0)
    feed = {x:features, y: output}
    est, v_loss, v_summary = sess.run([y_, val_loss, val_summary], feed_dict=feed)

    # input_headers  = [x.encode('latin1') for x in data.input_header]
    headers = ','.join(data.input_header) + ", gt-y, est-y"

    vals = np.concatenate((features, output, est), axis=1)
    
    # append dataset default mu and sigma for estimated values
    mu = np.append(data.mu, data.mu[-1])
    sigma = np.append(data.sigma, data.sigma[-1])

    # reverse the standardization operation to the vals
    vals = np.add(vals * sigma, mu)


    np.savetxt(csv_file, vals, header= headers, delimiter=",")
    summary_writer.add_summary(v_summary, step_init)
    logger.add('val_loss {:f}'.format(v_loss), do_print=True)
    logger.save()
Example #14
0
    return args


"""
To launch tensorboard, open a Terminal window and run
tensorboard --logdir=results/
Then, connect remotely to
address-ip-of-the-server:6006
6006 is the default port used by tensorboard.
"""
if __name__ == '__main__':
    args = parse_args()
    config = configs.Config(
        env_name=args.env_name,
        run_id=42,
        model_name='human',
        explore_name='human',
        batch=False
    )

    # make env
    env = gym.make(config.env_name)
    if not args.full:
        env = MaxAndSkipEnv(env, skip=config.skip_frame)
        env = PreproWrapper(
            env,
            prepro=greyscale,
            shape=(80, 80, 1),
            overwrite_render=config.overwrite_render
        )
Example #15
0
def main(conf_name, gpu):
    # will train on the first frame of the video, then run
    # inference on the rest of the frames.
    # optionally, we can also run zssr on the last image to
    # show the delta from the beginning to the end.

    if conf_name is None:
        conf = configs.Config()
    else:
        if conf_name == "X2_REAL_CONF_VIDEO":
            conf = configs.X2_REAL_CONF_VIDEO
        # elif conf_name == "X2_GRADUAL_IDEAL_CONF_VIDEO":
        #     conf = configs.X2_GRADUAL_IDEAL_CONF_VIDEO

    res_dir = prepare_result_dir(conf)
    local_dir = os.path.dirname(__file__)

    files = [
        file_path
        for file_path in glob.glob('%s/*.%s' %
                                   (conf.input_path, conf.input_file_ext))
        if not file_path[-7:-4] == '_gt'
    ]

    print("locations:", res_dir, local_dir)
    print("files:", files)

    for file_ind, input_file in enumerate(files):

        conf.name = input_file[:-4] + "_frame_1_2x2"

        vidcap = cv2.VideoCapture(input_file)

        video_length = int(vidcap.get(cv2.CAP_PROP_FRAME_COUNT)) - 1
        print("Number of frames: ", video_length)

        # frame 1:
        success, frame_one = vidcap.read()

        # train on frame one

        # TODO: move this conversion to run_ZSSR_single_input
        converted_frame_one = cv2.cvtColor(frame_one, cv2.COLOR_BGR2RGB)
        converted_frame_one = cv2.normalize(converted_frame_one,
                                            None,
                                            0,
                                            1,
                                            cv2.NORM_MINMAX,
                                            dtype=cv2.CV_32F)
        print(converted_frame_one)

        # TODO: not implementing ground truth at the moment.

        ground_truth_file = '0'
        image_size = frame_one.shape

        # MUST BE REVERSED FOR OPENCV STUFF
        new_image_size = (image_size[1] * 2, image_size[0] * 2)
        print("New Image Size:", new_image_size)

        fps = vidcap.get(cv2.CAP_PROP_FPS)
        print("Video FPS:", fps)

        # TODO: not implementing kernels at the moment.

        # TODO: clustering for scene detection (?)
        # or use final_test() to check if you need to retrain the net.

        kernel_files_str = '0'
        net = run_ZSSR_single_input.main(converted_frame_one,
                                         ground_truth_file, kernel_files_str,
                                         gpu, conf, res_dir)

        video_name = input_file[:-4] + "_2x2" + ".mp4"
        fourcc = cv2.VideoWriter_fourcc(*"mp4v")

        new_vid = cv2.VideoWriter(video_name, fourcc, fps, new_image_size)

        count = 0
        image = None
        image_temp = frame_one

        while success:
            image = image_temp
            # convert to float32:
            image = cv2.normalize(image,
                                  None,
                                  0,
                                  1,
                                  cv2.NORM_MINMAX,
                                  dtype=cv2.CV_32F)
            image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

            # have to figure out how to do this
            # I think we have to use forward_pass() in ZSSR.py, but not sure.
            scaled_image = net.forward_pass(image)

            print('Inference on Frame: ', count, scaled_image.shape)

            if count % 100 == 0:
                print(scaled_image)

            # convert to something we can add to new_vid
            scaled_image = cv2.normalize(scaled_image,
                                         None,
                                         0,
                                         255,
                                         cv2.NORM_MINMAX,
                                         dtype=cv2.CV_8U)
            scaled_image = cv2.cvtColor(scaled_image, cv2.COLOR_RGB2BGR)

            # writing to new video
            new_vid.write(scaled_image)

            success, image_temp = vidcap.read()
            count += 1

        # train on the last frame
        new_vid.release()
        conf.name = input_file[:-4] + "_frame_last_2x2"
        run_ZSSR_single_input.main(image, ground_truth_file, kernel_files_str,
                                   gpu, conf, res_dir)
Example #16
0
    parser.add_argument("--mapping",
                        type=bool,
                        help="create index with mapping and setting up es",
                        default=False)
    parser.add_argument("--delete",
                        type=bool,
                        help="delete data and index",
                        default=False)
    parser.add_argument("--data",
                        type=bool,
                        help="add simple data_files in ES",
                        default=False)
    args = parser.parse_args()

    # read the configuration settings
    conf = configs.Config()

    # waiting connection
    waiting_connection_with_es(conf.elastic_host, conf.elastic_port)

    # configuration logger
    if args.verbose:
        log_level = logging.INFO
    else:
        log_level = logging.ERROR

    ROOT_LOGGER.setLevel(log_level)
    ROOT_LOGGER.addHandler(
        logging.FileHandler(os.path.join(DISTRO_ROOT_PATH, 'logs', 'log.txt')))
    ROOT_LOGGER.info("\nBeginning at: {}".format(datetime.datetime.now()))
Example #17
0
import Mykytea
import flask
import flask_api.status
import flask_classy

import configs

# 作者
__author__ = 'Masaya Suzuki'

# バージョン
__version__ = '0.1.6'

# 設定
conf = configs.Config(pathlib.Path.cwd().parent / 'configs')

app = flask.Flask(__name__, conf.get('general', 'front', 'url'),
                  conf.get('general', 'front', 'dir path'))


def output_http_data(headers, body):
    """
    HTTPデータ (リクエストやレスポンス) の内容を出力する
    :param headers: HTTPデータ (リクエストやレスポンス) のheader
    :param body: HTTPデータ (リクエストやレスポンス) のbody
    """
    app.logger.debug('[Header]')

    for header in headers:
        app.logger.debug('{}: {}'.format(*header))