Esempio n. 1
0
    def reward_mask_height(self,
                           mask,
                           pose,
                           done_thr,
                           success_thr,
                           factor=100,
                           right_shift_one=1,
                           right_shift_two=1.5,
                           stretch_one=9,
                           stretch_two=2,
                           scale=300,
                           stretch=3000):
        done = False
        success = False
        reward = 0
        reward_fov, fov_score = self.reward_mask(mask, factor, right_shift_one,
                                                 right_shift_two, stretch_one,
                                                 stretch_two)
        reward_height = self.reward_height(pose, scale, stretch)

        reward = reward_fov + reward_height

        # Adding a step reward readded
        if pose[2] < done_thr:
            done = True
            if fov_score > success_thr:
                reward += 500
                log.warn("SUCCESS")
                success = True
            else:
                reward -= 500

        log.warn("Reward Total: {}".format(reward))
        return reward, done, success
Esempio n. 2
0
 def start(self):
     if not self.running:
         super(Worker, self).start()
         self.process = psutil.Process(self.pid)
         self.running = True
     else:
         log.warn('Worker %d: cannot start more than once' % self.id)
Esempio n. 3
0
    def init_config(self, config_file: str) -> None:
        # Load configuration file from disk
        config_file_path = Path(config_file)
        log.check(
            Path(config_file).exists(),
            message=f'Config file "{config_file_path}" doesn\'t exits',
        )
        with config_file_path.open() as config_file_fh:
            try:
                config_raw = yaml.safe_load(config_file_fh)
            except yaml.YAMLError as exc:
                log.error(
                    f'Configuration file "{config_file_path}" parse error\n{exc}'
                )

        # Set the config file we loaded
        self.config.config_file = str(config_file_path)
        # Load config from file
        for key, val in config_raw.items():
            # Don't load non_file_fields
            if key in self.config.non_file_fields:
                continue
            try:
                getattr(self.config, key)
            except AttributeError:
                # Warn when rubbish config is loaded
                log.warn(f'Loaded unknown configuration "{key}"')
            setattr(self.config, key, val)
        print("Finished Loading Config")
        return
    def _get_bv(self, vertex):
        """
        Build b vector for optimization constraints A * x = b 

        Parameters 
        ----------
        vertex : Vertex containing constraints to translate to the b vector 

        Return
        ------
        b : Dim x nb_constraints
 
        """
        const_number = vertex.get_constraints_number()

        if (const_number > self.max_derivative_to_optimize + 1):
            log.warn("Too many constraints, the higher ones will be ignored.")

        bv = np.zeros((self.dim, const_number))

        for c in range(const_number):
            const = vertex.get_constraint(c)
            for d in range(self.dim):
                bv[d, c] = const[d]

        return bv
    def eval_one(self, t_i, derivative):
        """
        Evaluate one trajectory derivative using a fine temporal vector. 

        Parameters 
        ----------
        t_fine : 1-D Numpy array time time points 
        derivative : Trajectory derivative to evaluate

        Return
        ------
        val : Dim x size_t_fine Numpy array with trajectory evaluation
 
        """
        time_stamp = [
            sum(self.time_segment[:i])
            for i in range(len(self.time_segment) + 1)
        ]

        val = np.zeros((self.dim, 1))
        for dd in range(self.dim):

            if t_i < time_stamp[0] or t_i > time_stamp[-1]:
                log.warn("WARNING: Eval of t: out of bound. Extrapolation.")

            m = self._find_pts_seg(t_i)
            val[dd, 0] = np.dot(
                self._build_time_vect(t_i - time_stamp[m], derivative).T,
                self.polyCoeffSet[dd, :, m])

        return val
    def _build_Q_sub(self, derivative_to_optimise):
        """
        Set sub-cost matrix Q for one segment 

        Parameters 
        ----------
        derivative_to_optimise : Target derivative for Q sub-matrix generation 

        Return
        ------
        Q matrix : (poly_deg + 1)) x (poly_deg + 1)
 
        """
        Q = np.zeros((self.N + 1, self.N + 1))

        if derivative_to_optimise > self.N:
            log.warn("Order of derivative > poly order, return zeros-matrix.")
            return Q

        for i in range(derivative_to_optimise, self.N + 1):
            for j in range(derivative_to_optimise, self.N + 1):
                Q[i,j] = 2 * self._get_nth_coeff(i, derivative_to_optimise) * self._get_nth_coeff(j, derivative_to_optimise) \
                             / (i + j - 2 * derivative_to_optimise + 1)

        return Q
Esempio n. 7
0
    def _get_angular_error_loss(net, loss_weights, groundtruths, weights=None):
        """ Returns scaled angular error losses. """
        losses = []
        log.check_eq(len(groundtruths), len(loss_weights))
        log.check_eq(len(net.estimator_net), len(loss_weights) - 1, \
            "You do not have an appropriate number of loss weights.")
        RELATIVE_WEIGHT = 5.0

        with tf.name_scope('angular_loss'):
            for i, w in enumerate(loss_weights):
                if i < len(loss_weights) - 1:
                    prediction = net.estimator_net[i].get_flow()
                else:
                    if net.options.use_context_net is False:
                        log.warn(
                            'Context network is not set up, so there is no ' +
                            'need to penalize flow at the finest resolution.')
                        break
                    prediction = net.get_output_flow()

                dim = prediction.shape.as_list()[1]
                gt_at_scale = groundtruths[dim]
                loss_name = '{}x{}'.format(dim, dim)

                log.check_eq(gt_at_scale.shape.as_list()[1],
                             prediction.shape.as_list()[1])
                log.check_eq(gt_at_scale.shape.as_list()[2],
                             prediction.shape.as_list()[2])

                loss = tf_utils.angular_loss_at_scale(
                    prediction, gt_at_scale, weights) * w * RELATIVE_WEIGHT
                tf.summary.scalar(loss_name, loss)
                losses.append(loss)
        return losses
Esempio n. 8
0
    def __init__(self, db_file, readonly=True, echo=True, append=False):
        self._db_file = db_file
        self._echo = echo
        self._warn = True
        # If max commit interval time is 60 sec, it the commit interval
        # is more than it, the buffer will be commited.
        self._max_commit_interval = 60
        # The timer
        self._timer = timer_lib.timer(start=True)
        self._color = color_lib.color(bold=True)
        self._err = self._color.red('ERROR') + ': '
        self._warn = self._color.yellow('WARNING') + ': '
        self._info = self._color.green('INFO') + ': '

        # Define the dumper of the key and value
        self._key_dumper = None
        self._val_dumper = None
        self._key_parser = None
        self._val_parser = None
        # The size of the commit buffer
        self._buf_size = 100
        self._buf_counter = 0
        self._cur = None
        self._iter = None
        # The keylist of the keys
        self._key_list = []
        # If true, need to regenerate the key list
        self._dirty_key_list = True

        if readonly:
            if not path_tools.check_path(self._db_file, False):
                log.error(self._err + 'Can not find the ' +
                          self._color.yellow(self._db_file))
                return
            self._db = plyvel.DB(self._db_file)
        else:
            if path_tools.check_path(self._db_file, False):
                print('%s %s already exists.'
                      % (self._warn, self._color.red(self._db_file)))
                k = raw_input('Do you want to delete it?[y/n]:')
                if k == 'y' or k == 'Y':
                    log.warn('Delete the %s file' % self._db_file)
                    shutil.rmtree(self._db_file)
                elif k == 'n' or k == 'N':
                    log.warn('Keep the %s file, and new entries will be added'
                             % self._db_file)
                else:
                    log.error('Wrong key input, exit the program')
                    sys.exit(2)

            self._db = plyvel.DB(self._db_file, create_if_missing=True)

        if self._db is None:
            log.error('\033[01;31mERROR\033[0m: Can not open the \
db file \033[32m%s\033[0m' % self._db_file)
            return

        if echo:
            log.info(self._info + 'Open ' +
                     self._color.yellow(self._db_file))
Esempio n. 9
0
    def run(self, train_loader, test_loader, loss_fn):
        
        try:
            from visualize import make_dot
            y = self.net.forward(Variable(torch.from_numpy(test_loader.dataset[0]['image'])))
            g = make_dot(y)
            g.engine='dot'
            g.format='pdf'
            print(g.render(filename=os.path.join(self.log_dir, 'net.gv')))
        except:
            logger.warn('failed to draw net.')
        

        logger.check_eq(self.done, False, 'Done already!')
        if self.cuda:
            self.net.cuda()

        logger.info('Network Architecture:')
        print(str(self.net))
        sys.stdout.flush()

        logger.info('{} Hyperparameters:'.format(self.solver.__class__.__name__))
        print(str(self.solver.defaults))
        sys.stdout.flush()

        logger.info('Initial test with random initialized parameters:')
        self.test(epoch=0, loader=test_loader, loss_fn=loss_fn)
        for epoch in range(1, self.total_epochs+1):
            self.train(epoch=epoch, loader=train_loader, loss_fn=loss_fn)
            self.test(epoch=epoch, loader=test_loader, loss_fn=loss_fn)
            self.invoke_epoch_callback()
        self.save_stats()
        self.done=True
Esempio n. 10
0
def parse_classifier_xml(config):
    """This function load the classifier xml file and parse the content
    a dict will be returned. The key is the int class number: 0~N
    the corresponding val is a list: [min_focal, max_focal]. e.g.
    {0: [10. 24], 1: [35, 50]. 2: [100. 200]}
    NOTE: All return values are int type

    If error, return None
    """
    try:
        root = et.parse(config).getroot()
    except:
        log.error('%s can not be parsed' % config)
        return None
    try:
        classes = root.findall('class')
    except:
        log.error('Can not find class section in %s' % config)
        return None
    rst = {}
    for cl in classes:
        try:
            class_id = int(cl.get('id'))
            class_min_focal = int(cl.get('minfocal'))
            class_max_focal = int(cl.get('maxfocal'))
            if class_max_focal < class_min_focal:
                log.warn('maxfocal must larger than minfocal in %s' %
                         et.tostring(cl))
        except:
            log.error('Error in parsing %s' % et.tostring(cl))
            return None
        rst[class_id] = [class_min_focal, class_max_focal]
    return rst
Esempio n. 11
0
 def invoke_epoch_callback(self):
     if len(self.epoch_callbacks) > 0:
         for ith, cb in enumerate(self.epoch_callbacks):
             try:
                 cb()
             except:
                 logger.warn('epoch_callback[{}] failed.'.format(ith))
Esempio n. 12
0
def msr_init(net):
    """
    MSR style initialization
    :param net:
    :return:
    """
    glog.info('initialization with MSR approach')
    try:
        for layer in net:
            if type(layer) == nn.Conv2d:
                n = layer.kernel_size[0]*layer.kernel_size[1]*layer.out_channels
                layer.weight.data.normal_(0, math.sqrt(2./n))
                layer.bias.data.zero_()
            elif type(layer) == nn.BatchNorm2d:
                layer.weight.data.fill_(1)
                layer.bias.data.zero_()
            elif type(layer) == nn.Linear:
                layer.bias.data.zero_()
    except TypeError:
        glog.warn('input argument is not iterable ... try to treat it as a single module')
        if type(net) == nn.Conv2d:
            n = net.kernel_size[0]*net.kernel_size[1]*net.out_channels
            net.weight.data.normal_(0, math.sqrt(2./n))
            net.bias.data.zero_()
        elif type(net) == nn.BatchNorm2d:
            net.weight.data.fill_(1)
            net.bias.data.zero_()
        elif type(net) == nn.Linear:
            net.bias.data.zero_()
Esempio n. 13
0
    def on_call_back(self, ch, method, props, body):
        job = Job.from_json(body.decode('ascii'))
        self.job_status.remove(job.id)

        if job.consumer != None:
            # Send job to the following module.
            conf = {}
            for module in self.conf['modules']:
                if module['name'] == job.consumer:
                    conf = module
                    break

            for params in self.expand_params(conf):
                job.params = params
                job.id = self.cur_job_id
                self.cur_job_id += 1
                self.send_job(job.consumer, job)

            ch.basic_ack(delivery_tag=method.delivery_tag)

        else:
            # Job compeleted
            log.info('Job ' + str(job.id) + ' completed.')
            #log.debug(job)
            ch.basic_ack(delivery_tag=method.delivery_tag)

            # When received the last job.
            if job.id == self.n_jobs - 1:

                # Make sure all jobs finished.
                flag = True
                while flag:
                    flag = False
                    #log.debug(self.job_status)
                    if len(self.job_status) > 0:
                        flag = True
                        time.sleep(1)

                # Shut down the pipeline.
                for module in self.conf['modules']:
                    for _ in range(module['instances']):
                        self.send_command(module['name'], -1, 'shutdown')
                self.send_command('logger', -1, 'shutdown')


                log.warn('All jobs are completed, shutting down the pipeline')
                self.channel.stop_consuming()

                if self.clean_up:
                    log.warn('Cleaning up intermediate files')
                    if self.use_mongodb:
                        fs = gridfs.GridFS(
                            pymongo.MongoClient(self.mongodb_host).boom
                            )
                        for grid_out in fs.find({"metadata": self.output_base}, no_cursor_timeout=True):
                            fs.delete(grid_out._id)
                    else:
                        os.system('rm ' + self.output_base + '/*.json')
                quit()
Esempio n. 14
0
def close(db):
    """Close an opened lmdb
    """
    if db is not None:
        try:
            db.close()
        except:
            log.error('\033[0;31mClose lmdb error\033[0m')
    else:
            log.warn('\033[1;33mDB handle is None\033[0m')
Esempio n. 15
0
    def receive_job(self, ch, method, properties, body):

        ## Parse request body.
        data = json.loads(body.decode('ascii'))
        if data['type'] == 'job':
            ch.basic_ack(delivery_tag=method.delivery_tag)
            self.channel.stop_consuming()
            job = Job.from_json(data['body'])
            log.info(self.name + ' received job: ' + str(job.id))

            ## Load data.
            data = self.load_job_data(job)

            ## Process data.
            data = self.process(job, data)

            ## Update job info.
            job.producer = self.name
            job.consumer = self.output_module
            job.output_path += self.name + '_' + json.dumps(job.params) + '_'
            if job.config is None:
                job.config = [{'module': self.name, 'params': job.params}]
            else:
                job.config.append([{
                    'module': self.name,
                    'params': job.params
                }])

            ## Save data and update the data uri.
            job.input_uri = self.save_job_data(job, data)

            ## Update timestamp and processing time.
            job.update_timestamp()

            ## Connect
            self.connect()
            ## Send back resulting job.
            self.channel.basic_publish(exchange='job',
                                       routing_key=properties.reply_to,
                                       properties=pika.BasicProperties(),
                                       body=job.to_json())

            log.info(self.name + ' sent back job: ' + str(job.id))

        elif data['type'] == 'command':
            cmd = json.loads(data['body'])
            ch.basic_ack(delivery_tag=method.delivery_tag)
            if cmd['module'] == self.id or cmd['module'] == -1:
                log.warn('Module ' + str(self.id) + ' ' + self.name +
                         ' received command ' + cmd['command'])

                if cmd['command'] == 'shutdown':
                    self.channel.stop_consuming()
                    self.is_finished = True
                    self.cleanup()
 def set_op_type(self, op_type):
     if op_type in Supported_Op_Type._value2member_map_:
         self.op_type = op_type
     elif op_type in Might_Support_Op_Type._value2member_map_:
         self.op_type = op_type
         log.warn('Might support op type {}'.format(op_type))
     elif op_type in Mir_Op_Type._value2member_map_:
         self.op_type = op_type
     else:
         self.op_type = op_type
         log.warn('Not support op type {}'.format(op_type))
 def set_param_name(self, param_name, graph_inputs=None):
     if graph_inputs and param_name in graph_inputs:
         self.param_name = 'input/' + str(param_name)
         return
     if self.op_type == 'Const':
         if isinstance(param_name, str):
             self.param_name = 'params/' + param_name
         else:
             self.param_name = 'params/' + str(param_name)
     else:
         log.warn('param name only for params node, not node {}, {}'.format(
             self.name, self.op_type))
Esempio n. 18
0
def update_dict(target_dict, new_dict, validate_item=None):
    for key, value in new_dict.items():
        if validate_item:
            validate_item(key, value)
        if key not in target_dict:
            log.warn('Skipping unexpected key in config: {}'.format(key))
            continue
        if isinstance(target_dict[key], dict) and \
                isinstance(value, dict):
            update_dict(target_dict[key], value, validate_item=validate_item)
        else:
            target_dict[key] = value
Esempio n. 19
0
    def set_weight_mask(self, weights):
        """
        Set weight mask for derivation importance in cost matrix

        Parameters 
        ----------
        weight : 1-D Numpy array of size derivative_to_optimize + 1
 
        """
        if weights.shape[0] > self.N:
            log.warn("To many weights, the higher terms will be ignored.")
            self.weight_mask = weights[:self.derivative_to_optimize]
        else:
            self.weight_mask = weights
Esempio n. 20
0
def get_default_config():
    config_file = osp.join(here, 'default_config.yaml')
    with open(config_file) as f:
        config = yaml.load(f)

    # save default config to ~/.labelmerc
    user_config_file = osp.join(osp.expanduser('~'), '.labelmerc')
    if not osp.exists(user_config_file):
        try:
            shutil.copy(config_file, user_config_file)
        except Exception:
            log.warn('Failed to save config: {}'.format(user_config_file))

    return config
Esempio n. 21
0
    def _get_endpoint_error_loss(net,
                                 loss_weights,
                                 groundtruths,
                                 weights=None,
                                 loss_type=None):
        """
        Returns endpoint error loss. Options are:
         - L2
         - Huber
         - L2 weighted by uncertainty, like eqn 8 in:
           https://arxiv.org/pdf/1703.04977.pdf
        """
        losses = []
        log.check_eq(len(groundtruths), len(loss_weights))
        log.check_eq(len(net.estimator_net) + 1, len(loss_weights), \
            ("You do not have an appropriate number of loss weights. "
             "Should have {}".format(1 + len(net.estimator_net))))
        with tf.name_scope('endpoint_loss'):
            for i, w in enumerate(loss_weights):
                if i < len(loss_weights) - 1:
                    prediction = net.estimator_net[i].get_flow()
                else:
                    if net.options.use_context_net is False:
                        log.warn(
                            'Context network is not set up, so there is no ' +
                            'need to penalize flow at the finest resolution.')
                        break
                    prediction = net.get_output_flow()

                dim = prediction.shape.as_list()[1]
                loss_name = '{}x{}'.format(dim, dim)

                gt_at_scale = groundtruths[dim]
                log.check_eq(gt_at_scale.shape.as_list()[1],
                             prediction.shape.as_list()[1])
                log.check_eq(gt_at_scale.shape.as_list()[2],
                             prediction.shape.as_list()[2])

                if loss_type == 'HUBER':
                    loss = tf_utils.endpoint_huber_loss_at_scale(
                        prediction, gt_at_scale, weights) * w
                elif loss_type == 'L2':
                    loss = tf_utils.endpoint_loss_at_scale(
                        prediction, gt_at_scale, weights) * w
                else:
                    log.fatal("Unrecognized loss type -- should specify "
                              "{'HUBER', 'L2' 'WEIGHTED'}.")
                tf.summary.scalar(loss_name, loss)
                losses.append(loss)
        return losses
Esempio n. 22
0
    def start(self):
        """
        Start the threads, which keep monitor the inqueue and try to convert
        array to datum and to string
        """
        # Check
        if self.func_seg_image is self._ncut_seg and self.thread_num != 1:
            log.warn('\033[0;33mWARNING\033[0m: In Ncut mode, the multithread \
might cause problem.')
        # Start the worker thread
        for i in range(self.thread_num):
            hThread = threading.Thread(target=self._thread)
            hThread.start()
            self.thread_pool.append(hThread)
Esempio n. 23
0
    def __get_time(self, namestr):
        timestr = self.time.find(namestr).attrib['val']
        try:
            fmt = self.time.find(namestr).attrib['format']
        except:
            log.warn('Can not tell the date format, return directly')
            return timestr

        if fmt.lower() == 'flickr':
            return self.convert_flickrdate_to_unix(timestr)
        elif fmt.lower() == 'unix':
            return timestr
        else:
            log.warn('Can not tell the date format, return directly')
            return timestr
Esempio n. 24
0
    def _dump(self, val, dumper):
        """
        If the val is already a string and the dumper is None
        the val will be remain the same.
        If the val is not a string, and the dumper is None, it will
        try to use str() to convert to the val to string, and give a warning
        """
        if dumper is not None:
            return dumper(val)
        if type(val) is str:
            return val
        val_str = str(val)
        log.warn('\033[0;32mWARNING:\033[0m The %s is not a string, try using \
str() to convert is' % val_str)
        return val_str
Esempio n. 25
0
def normalize_image(img):
    """
    Normalize an image such that intensity ~ N(0, 1)
    :param img: input image
    :return: normalized image
    """
    if img.max() <= 1:
        glog.warn(Colors.yellow('intensity value already in [0, 1]?'))
    else:
        normalized_image = img.astype('f') / 255

    for i in range(3):
        normalized_image[..., i] = (normalized_image[..., i] -
                                    stats['mean'][i]) / stats['std'][i]
    return normalized_image
Esempio n. 26
0
    def reward_mask(self, mask, factor, right_shift_one, right_shift_two,
                    stretch_one, stretch_two):
        reward = 0
        height, width = mask.shape
        tot_num_pixels = height * width
        fov_score = (cv2.sumElems(mask)[0] / 255) / tot_num_pixels
        log.warn("FOV Score: {}".format(fov_score))

        reward = factor * (np.tanh(
            (1 / stretch_one) *
            (2 * np.pi * fov_score - right_shift_one * np.pi)) + np.tanh(
                (1 / stretch_two) *
                (2 * np.pi * fov_score - right_shift_two * np.pi)))
        log.warn("Reward for FOV: {}".format(reward))

        return reward, fov_score
Esempio n. 27
0
def _gmm1d(data_list, n=1, num=100, start=None, end=None):
    """
    The backend for fitting the 1d gmm model without display
    The return value is a list of a list
    which contains the points of each gaussian model
    Param:
        data_list: The list or np.array contains all 1-dim data
        n:      The component number of the gmm model
        num:    The number of points which used to display the curve
        start:  The start x label for drawing the gmm curve
        end:    The end x label for drawing the gmm curve
    """

    data_list = np.array(data_list)
    length = data_list.flatten().shape[0]
    # Init the gmm
    gmm = GaussianMixture(n_components=n)
    gmm.fit(np.reshape(data_list, (length, 1)))

    # Check if success
    if gmm.converged_:
        log.info('Fit \033[01;32mSUCCESSFUL\033[0m')
    else:
        log.warn('Fit \033[01;31mFAILED\033[0m')

    if start is None:
        start = data_list.min()
    if end is None:
        end = data_list.max()

    x_list = np.arange(start, end, (end - start) / float(num))

    gauss_mixt = \
        np.array([p * norm.pdf(x_list, mu, sd)
                 for mu, sd, p in zip(gmm.means_.flatten(),
                                      np.sqrt(gmm.covariances_.flatten()),
                                      gmm.weights_)])

    # Display the distribution info
    for mu, sd, p in zip(gmm.means_.flatten(),
                         np.sqrt(gmm.covariances_.flatten()),
                         gmm.weights_):
        log.info('Model Weight: \033[01;31m%f\033[0m, \
mean: \033[01;32m%f\033[0m, std: \033[01;33m%f\033[0m' % (p, mu, sd))

    return x_list, gauss_mixt
Esempio n. 28
0
    def _find_pts_seg(self, pts):
        time_stamp = [
            sum(self.time_segment[:i])
            for i in range(len(self.time_segment) + 1)
        ]
        idx_ = np.where(time_stamp <= pts)[0]

        if idx_.shape[0] > 0:
            seg = np.max(idx_)
            if seg >= self.Ns:
                if pts != time_stamp[-1]:
                    log.warn('Eval of t : geq TM. eval target = last segment')
                seg = self.Ns - 1
        else:
            log.warn('Eval of t : leq T0. eval target = 1st segment')
            seg = 0
        return seg
Esempio n. 29
0
    def __init__(self, config):
        self.config = config
        self.root = et.parse(self.config).getroot()
        try:
            self.version = float(self.root.attrib['ver'])
        except:
            log.warn('The xml config file does not contains a version')
            self.version = None
        # Parse the whole xml file to check whether it is intact
        self.auth = self.root.find('auth')
        if self.auth is None:
            log.error('The xml config file has no auth section')

        self.time = self.root.find('time')
        if self.time is None:
            log.error('The xml config file has no time section')

        self.fetch = self.root.find('fetch')
        if self.fetch is None:
            log.error('The xml config file has no fetch section')

        self.hitcontrol = self.root.find('hitcontrol')
        if self.hitcontrol is None:
            log.error('This xml config file has no hitcontrol section')

        self.photoscreen = self.root.find('photoscreen')
        if self.photoscreen is None:
            log.error('This xml config file has no photoscreen section')

        # Init the members of this class
        self.key = self.get_key()
        self.secret = self.get_secret()
        self.time_end = self.get_end_time()
        self.time_start = self.get_start_time()
        self.time_interval = self.get_interval()
        self.time_interval_min = self.get_interval_min()
        self.time_interval_max = self.get_interval_max()
        self.time_interval_init = self.get_interval_init()
        self.time_dynamic = self.is_dynamic_time()
        self.time_show = self.get_show_time()
        self.hit_limit = self.get_hit_limit()
        self.page_size = self.get_page_size()
        self.max_size = self.get_max_size()
        self.batch_size = self.get_batch_size()
        self.urls = self.__combine_list_to_string(self.get_urls())
Esempio n. 30
0
def open_db(lmdb_file):
    """Check if the lmdb file already exists, and ask whether delete it
    or keep add entries based on it.
    return the lmdb object
    """
    if os.path.exists(lmdb_file):
        print('%s is already exists.' % lmdb_file)
        k = raw_input('Do you want to delete it?[y/n]:')
        if k == 'y' or k == 'Y':
            log.warn('Delete the %s file' % lmdb_file)
            shutil.rmtree(lmdb_file)
        elif k == 'n' or k == 'N':
            log.warn('Keep the %s file, and new entries will be added' %
                     lmdb_file)
        else:
            log.error('Wrong key input, exit the program')
            sys.exit(2)

    db = lmdb.open(lmdb_file, map_size=int(1e12))
    return db
Esempio n. 31
0
def main(argv):
    src_file = None
    dst_file = None
    config_file = None
    result_dict = {}
    help_msg = 'dataset_split_class.py -i <indexfile> -o <output> -c <config>\n\
-i <file>           The input index text file\n\
-o <file>           The output index text file\n\
-c <file>           The configure xml file'

    try:
        opts, args = getopt.getopt(argv, 'hi:c:o:')
    except getopt.GetoptError:
        print help_msg
        sys.exit(2)

    for opt, arg in opts:
        if opt == '-h':
            print help_msg
            sys.exit()
        elif opt == '-i':
            src_file = arg
        elif opt == '-o':
            dst_file = arg
        elif opt == '-c':
            config_file = arg
        else:
            print help_msg
            sys.exit(2)

    if src_file is None or dst_file is None or config_file is None:
        print help_msg
        sys.exit(2)

    # Check the config file
    log.info('Parsing configure file: %s' % config_file)
    config = myxml.parse_classifier_xml(config_file)
    result_dict = dict.fromkeys(config)
    if config is None:
        log.fatal('Parse configure file %s error' % config_file)
        sys.exit(2)
    # Check the src_file
    log.info('Opening %s' % src_file)
    try:
        src_fp = open(src_file, 'r')
    except IOError:
        log.fatal('Can not open %s' % src_file)
        sys.exit(2)
    # Open the dst file
    log.info('Opening %s' % dst_file)
    try:
        dst_fp = open(dst_file, 'w')
    except IOError:
        log.fatal('Can not open %s' % dst_file)
        sys.exit(2)

    # loop the src_file
    for line in src_fp.readlines():
        element = line.split(' ')
        if len(element) != 2:
            log.warn('\033[31mWARNING:\033[0m Extra space in %s' % line)
            continue
        focal_length = int(element[-1])
        image_path = element[0]
        # Get the label
        label = get_class(config, focal_length)
        if label is None:
            log.warn('\033[32mSKIP:\033[0m %s' % line)
            continue
        if result_dict[label] is None:
            result_dict[label] = 1
        else:
            result_dict[label] += 1
        # Write the new file
        dst_fp.writelines(image_path + ' %d\n' % label)

    src_fp.close()
    dst_fp.close()
    log.info('Final result: %s' % str(result_dict))
    log.info('Finished')
Esempio n. 32
0
 def test_warn(self):
     log.warn('test')
Esempio n. 33
0
import glog as log

log.info("It works.")
log.warn("Something not ideal")
log.error("Something went wrong")
log.fatal("AAAAAAAAAAAAAAA!")

log.check(False)
Esempio n. 34
0
def try_log():
    log.debug('2333happy deubgging....')
    log.info('it works')
    log.warn('something not ideal')
    log.error('something went wrong')
    log.fatal('AAAAAAAAAAA!')