Пример #1
0
    def normalDownload(url):
        '''
        随机选择User-Agent,构成请求头
        备注:可增加,使用代理ip,进行下载
        '''

        count = 0  # 重试次数
        while count <= HtmlDownload_RETRY_TIME:
            try:
                r = requests.get(url=url,
                                 headers=HtmlDownload_get_header(),
                                 timeout=HtmlDownload_TIMEOUT)
                r.encoding = chardet.detect(r.content)['encoding']
            except Exception as e:
                count += 1
                if count > HtmlDownload_RETRY_TIME:
                    logger.debug("download " + url + " failed: {}".format(e),
                                 exc_info=True)
                    logger.warning("download " + url +
                                   " failed : {}".format(e))
                    return -1
                continue

            if (not r.ok):
                count += 1
            else:
                logger.info("download " + url + " successful")
                return r.text
        logger.warning("download " + url + " failed")
        return -1
Пример #2
0
def load_numpy_model(model_path, numpy_var_list={}):
    '''
        @brief: load numpy variables from npy files. The variables could be
            from baseline or from ob_normalizer
        @output:
            It is worth mentioning that this function only returns the value,
            but won't load the value (while the tf variables will be loaded at
            the same time)
    '''
    is_file_valid(model_path)
    logger.info('LOADING numpy variables')

    output_save_list = np.load(model_path, encoding='latin1').item()
    numpy_name_list = [key for key, val in numpy_var_list.items()]

    # get the weights one by one
    for name, val in output_save_list.items():
        if name in numpy_name_list:
            logger.info(
                '\t\tloading numpy pretrained parameters {}'.format(name))
            numpy_name_list.remove(name)  # just for sanity check
            numpy_var_list[name] = val
        else:
            logger.warning(
                '\t\t**** Parameters Not Exist **** {}'.format(name))

    if len(numpy_name_list) > 0:
        logger.warning(
            'Some parameters are not load from the checkpoint: {}'.format(
                numpy_name_list))
    return numpy_var_list
Пример #3
0
def stage_files(file_locations, soap_envelope, ignore_errors=True):
    """Request scene bundles be moved to fastest cache available."""
    try:
        logger.info('Start staging...')
        url = "https://dds.cr.usgs.gov/HSMServices/HSMServices?wsdl"
        headers = {'content-type': 'text/xml'}

        files = ''.join(
            ['<files>{}</files>'.format(x) for x in file_locations])
        soap_envelope = soap_envelope.format(FILES_PLACE_HOLDER=files,
                                             WAIT="false")

        logger.debug('SOAP Envelope: %s', soap_envelope)
        request_object = urllib2.Request(url, soap_envelope, headers)

        response = urllib2.urlopen(request_object)

        html_string = response.read()
        logger.info('Stage response: %s', html_string)

    except Exception as e:
        if not ignore_errors:
            logger.exception('Error staging files!')
            raise
        else:
            logger.warning('Error staging files: %s.  Continue anyway.', e)

    else:
        logger.info('Staging succeeded...')
Пример #4
0
 def login(
     cls,
     demo: bool = True,
     secret_path: str = None,
     id_: str = None,
     pw: str = None,
     cert_pw: str = None,
 ):
     """
     Login to Ebest secutiry server
     Args:
         demo (bool) <True>: True -> demo server / False -> real server
         secret_path (str): path in which the secret file exists
         id_ (str): id
         pw (str): password
         cert_pw (str): password for certificate
     """
     logger.info("connection check")
     if not cls.is_connected():
         logger.warning("not yet connected")
         cls.connect(demo=demo)
     elif cls.is_login():
         if cls.is_demo() is not demo:
             logger.warning(f"alredy connected but server is different")
             cls.connect(demo=demo)
         else:
             logger.info("already login")
             post_quit()
             return
     logger.info("login attempt")
     secrets = get_secrets(secret_path)
     id_ = id_ or secrets["ID"]
     pw = pw or (secrets["DEMO_PW"] if demo else secrets["PW"])
     cert_pw = cert_pw or secrets["CERT_PW"]
     cls.session.Login(id_, pw, cert_pw, 0, False)
Пример #5
0
    def tokenize(self, tokenize_func, label_func):
        """Tokenize Word and assign token labels.

        Args:
            tokenize_func: function taking word text and returning list
                of token texts.
            label_func: function taking word label and token texts
                and returning list of of token labels.
        """
        if self.tokens is not None:
            logger.warning('Word tokenized repeatedly')
        token_texts = tokenize_func(self.text)
        if len(token_texts) == 0:
            unk_token = '[UNK]'  # TODO
            logger.warning(f'Word "{self.text}" tokenized to {token_texts}, '
                           f'replacing with {unk_token}')
            token_texts = [unk_token]
        token_labels = label_func(self, token_texts)
        # TODO parameterize use of segment IDs
        # segment_ids = [0] + [1] * (len(token_texts)-1)
        segment_ids = [0] * len(token_texts)
        assert len(token_labels) == len(token_texts)
        assert len(segment_ids) == len(token_texts)
        self.tokens = [
            Token(t, label=l, word=self, segment_id=s)
            for t, l, s in zip(token_texts, token_labels, segment_ids)
        ]
Пример #6
0
def _get_para_list(xml_soup, node_type_allowed):
    '''
        @brief:
            for each type in the node_type_allowed, we find the attributes that
            shows up in the xml
            below is the node parameter info list:

            @root (size 0):
                More often the case, the root node is the domain root, as
                there is the 2d/3d information in it.

            @body (max size: 3):
                @pos: 3

            @geom (max size: 9):
                @fromto: 6
                @size: 1
                @type: 2

            @joint (max size: 11):
                @armature: 1
                @axis: 3
                @damping: 1
                @pos: 3
                @stiffness: 1  # important
                @range: 2
    '''
    # step 1: get the available parameter list for each node
    para_list = {node_type: [] for node_type in node_type_allowed}
    mj_soup = xml_soup.find('worldbody').find('body')
    for node_type in node_type_allowed:
        # search the node with type 'node_type'
        node_list = mj_soup.find_all(node_type)  # all the nodes
        for i_node in node_list:
            # deal with each node
            for key in i_node.attrs:
                # deal with each attributes
                if key not in para_list[node_type] and \
                        key in PARAMETERS_DEFAULT_DICT[node_type]:
                    para_list[node_type].append(key)

    # step 2: get default parameter settings
    default_dict = PARAMETERS_DEFAULT_DICT
    default_soup = xml_soup.find('default')
    if default_soup is not None:
        for node_type, para_type_list in para_list.iteritems():
            # find the default str if possible
            type_soup = default_soup.find(node_type)
            if type_soup is not None:
                for para_type in para_type_list:
                    if para_type in type_soup.attrs:
                        default_dict[node_type][para_type] = \
                            type_soup[para_type]
            else:
                logger.info('No default settings available for type {}'.format(
                    node_type))
    else:
        logger.warning('No default settings available for this xml!')

    return para_list, default_dict
Пример #7
0
def parseMessage(msgData, authors):
    """Parse the message contained in msgData.

     Authors should be a dict to provide a correspondence between the IDs as present in the msgData
     and eventually preferred aliases. If a key is not present, the ID itself is used as alias for all
     successive messages.

    :return: A string representing the parsed messages, or None if the value for
    a specific key was not found
    """
    try:
        localTimestamp = time.localtime(msgData["timestamp"] / 1000)
        dateAndTime = time.strftime("%Y.%m.%d %H:%M:%S", localTimestamp)
        body = msgData["body"].replace("\n", " ")
        authorId = msgData["author"].split(":")[1]
        if authorId not in authors:
            logger.warning(
                "Missing value for author ID {}. Using directly the ID for all successive messages"
                .format(authorId))
            authors[authorId] = str(authorId)
        author = authors[authorId]
        message = str(dateAndTime) + " " + author + " " + body
        return message
    except KeyError:
        logger.error("Parsing message. KeyError")
        logger.error(msgData)
        return None
Пример #8
0
 def handle_ready(self, worker_id, result=None):
     """Handle worker ready message.
     
     If the worker isn't already in the worker pool, add 
     it implicitly and print a warning.
     
     If the worker is not initialized, keep the worker in 
     busy state and deal the initialization task.
     
     Else, if the worker isn't already in available state,
     make it available."""
     if not worker_id in self.worker_pool:
         logger.warning(
             'Implicitly creating worker with ID {}'.format(worker_id))
         self.worker_pool.create(worker_id=worker_id)
     # Ready message will be received multiple times if the worker is
     # neither terminated nor assigned a new task.
     worker = self.worker_pool.get(worker_id)
     if not self.worker_pool.is_free(worker_id):
         self.worker_pool.free(worker_id)
         if worker.task_id is not None:
             if worker.task_id == 'init':
                 self.finalize_worker(worker, result)
             else:
                 self.task_queue.finish(worker.task_id, result)
Пример #9
0
 def fit(self):
     '''
         @brief:
             It is a dummy function for the compatibility of the program.
             As for the numpy baseline function, we run @fit to update.
     '''
     logger.warning('This is a dummy function!')
Пример #10
0
    def __init__(self, env_name, n_action_repeat,
                 display, data_format='NCHW', return_cumulated_reward=False,
                 is_training=True):
        '''
            @brief: init the environment
            @input:
                @env_name: the name of the game
                @n_action_repeat: the number of same action to be excecuted
                @data_format: 'NWHC' or 'NCWH' useful when defining the network
        '''
        self.name = env_name
        # TODO: action repeat is not useful
        self.n_action_repeat = 1
        self.env = gym.make(env_name)
        self.temp_env = None
        self.data_format = data_format
        self.display = display
        self.display_backup = self.display
        self.return_cumulated_reward = return_cumulated_reward
        self.is_train = is_training

        if self.data_format != 'NCHW':
            logger.warning('You sure you want to use NWHC format?')

        assert n_action_repeat >= 1, \
            logger.error('Action must be at least used once')
        logger.info('Init game environments {}'.format(self.name))
Пример #11
0
 def saveUser(self, user):
     assert type(user) == User, "类型错误"
     sql = "insert into User(userName,password,permission,wechatId,wechatName,registerTime,phoneNumber,emailAddress) values(%s,%s,%s,%s,%s,%s,%s,%s)"
     value = [
         user.userName,
         user.getPassword(), user.permission, user.wechatId,
         user.wechatName, user.registerTime, user.phoneNumber,
         user.emailAddress
     ]
     cursor = self.db.cursor()
     try:
         result = cursor.execute(sql, value)
         self.db.commit()
         cursor.close()
         return result
     except Exception as e:
         self.db.rollback()
         cursor.close()
         if e.args[0] == 1062:
             logger.warning(e.args[1])
             return 0
         logger.error("Failed:" + str(sql), exc_info=True)
         return -1
     pass
     return result
Пример #12
0
    def start(self):
        while True:
        try:
            # 1. Capture images from all cameras
            logger.debug("Capturing Images")
            images = self.get_images()
            # 2. Send them to the remote server
            logger.debug("Submitting Images")
            self.post_images(images)
        except:
            logger.warning("Unable to retrieve and send images")

            # Wait
            time.sleep(PERIOD)

    def get_images(self):
        images = []
        for cam in self.cameras:
            # Get Image from camera
            img = cam.getImage()
            images.append(img)
        return images

    def post_images(self, images):
        #Todo: Saving the images to disk until webserver is up and running
        for i in xrange(self.n_cameras):
            img = images[i]
            img.show()
            img.save("images/{}-{}.jpg".format(i, time.time()))
Пример #13
0
def parseMessagesFromFile(filePath, limit=0, startDate=None, endDate=None):
    messages = []
    senders = set([])
    if startDate:
        startDate = datetime.strptime(startDate, Message.DATE_FORMAT)
    if endDate:
        endDate = datetime.strptime(endDate, Message.DATE_FORMAT)
    try:
        with open(filePath, 'r', encoding="utf8") as f:
            for line in f:
                date, time, sender, text = line.split(' ', 3)
                if startDate or endDate:
                    thisDate = datetime.strptime(date, Message.DATE_FORMAT)
                    if (not startDate or thisDate >= startDate) and (
                            not endDate or thisDate <= endDate):
                        messages.append(
                            Message(date, time, sender, text.strip()))
                else:
                    messages.append(Message(date, time, sender, text.strip()))
                senders.add(sender)
                if limit != 0 and len(messages) >= limit:
                    break
    except IOError:
        logger.warning("No such file: " + filePath)
    return messages, senders
Пример #14
0
def image_meta(url, url_idx, web):
    r = web.get(url)
    if not r:
        logger.warning("Could not download image")
        return None
    buf = r.content

    try:
        f = BytesIO(buf)
        im = Image.open(f)

        meta = {
            "url": url_idx,
            "size": len(buf),
            "width": im.width,
            "height": im.height,
            "sha1": hashlib.sha1(buf).hexdigest(),
            "md5": hashlib.md5(buf).hexdigest(),
            "crc32": format(zlib.crc32(buf), "x"),
            "dhash": b64hash(imagehash.dhash(im, hash_size=12), 18),
            "phash": b64hash(imagehash.phash(im, hash_size=12), 18),
            "ahash": b64hash(imagehash.average_hash(im, hash_size=12), 18),
            "whash": b64hash(imagehash.whash(im, hash_size=8), 8),
        }
    except Exception as e:
        logger.warning("exception during image post processing: " + str(e))
        return None

    del im, r, buf

    return meta
Пример #15
0
    def __init__(self, config, stage='train'):
        '''
            @brief:
                for the input, we have the noise input, the img input, the
                text representation input
        '''

        assert stage in ['train', 'test'], \
            logger.error('Invalid training stage')
        logger.warning('test mode is not supported currently')
        self.config = config
        self.batch_size = config.TRAIN.batch_size
        self.stage = stage
        self.train = (self.stage == 'train')

        # define the placeholders
        self.noise_input = tf.placeholder(
            tf.float32, [self.batch_size, self.config.z_dimension])
        self.real_img = tf.placeholder(tf.float32,
                                       [self.batch_size, 64, 64, 3])
        self.real_sen_rep = tf.placeholder(tf.float32, [self.batch_size, 1024])
        self.wrong_sen_rep = tf.placeholder(tf.float32,
                                            [self.batch_size, 1024])
        self.step = 0

        return
Пример #16
0
def direct_clip(stacking, band_name, clip_extents, tile_id, rename, workdir):
    """Clip datatypes which require no special processing."""
    logger.info('     Start processing for band: %s', band_name)
    mosaic_filename = os.path.join(workdir, tile_id,
                                   tile_id + '_' + rename + '.tif')

    if os.path.exists(mosaic_filename):
        logger.warning("Skip previously generated result %s", mosaic_filename)
        return mosaic_filename

    warp_cmd = ('gdalwarp -te {extents}'
                ' -co "compress=deflate" -co "zlevel=9"'
                ' -co "tiled=yes" -co "predictor=2"').format(
                    extents=clip_extents)

    for stack in reversed(stacking):
        scene_name = util.ffind(workdir, stack['LANDSAT_PRODUCT_ID'],
                                '*' + band_name + '.tif')
        warp_cmd += ' ' + scene_name
    warp_cmd += ' ' + mosaic_filename
    util.execute_cmd(warp_cmd)

    logger.info('    End processing for %s as %s ', band_name, mosaic_filename)
    if not os.path.exists(mosaic_filename):
        logger.error('Processing failed to generate desired output: %s',
                     mosaic_filename)
    return mosaic_filename
Пример #17
0
def get_or_create_unconverted_source_url(youtube_id):
    matching_keys = list(unconverted_bucket.list(youtube_id))
    matching_key = None

    if len(matching_keys) > 0:
        if len(matching_keys) > 1:
            logger.warning("More than 1 matching unconverted video URL found for video {0}".format(youtube_id))
        matching_key = matching_keys[0]
    else:
        logger.info("Unconverted video not available on s3 yet, downloading from youtube to create it.")

        video_path = youtube.download(youtube_id)
        logger.info("Downloaded video to {0}".format(video_path))

        assert(video_path)

        video_extension = splitext(video_path)[1]
        assert video_extension[0] == "."
        video_extension = video_extension[1:]
        if video_extension not in ["flv", "mp4"]:
            logger.warning("Unrecognized video extension {0} when downloading video {1} from YouTube".format(video_extension, youtube_id))

        matching_key = Key(unconverted_bucket, "{0}/{0}.{1}".format(youtube_id, video_extension))
        matching_key.set_contents_from_filename(video_path)

        os.remove(video_path)
        logger.info("Deleted {0}".format(video_path))

    return "s3://{0}/{1}".format(unconverted_bucket.name, matching_key.name)
def parseMessage(msgData, authors):
    """Parse the message contained in msgData.

     Authors should be a dict to provide a correspondence between the IDs as present in the msgData
     and eventually preferred aliases. If a key is not present, the ID itself is used as alias for all
     successive messages.

    :return: A string representing the parsed messages, or None if the value for
    a specific key was not found
    """
    try:
        localTimestamp = time.localtime(msgData["timestamp"]/1000)
        dateAndTime = time.strftime("%Y.%m.%d %H:%M:%S", localTimestamp)
        body = msgData["body"].replace("\n", " ")
        authorId = msgData["author"].split(":")[1]
        if authorId not in authors:
            logger.warning("Missing value for author ID {}. Using directly the ID for all successive messages".format(authorId))
            authors[authorId] = str(authorId)
        author = authors[authorId]
        message = str(dateAndTime) + " " + author + " " + body
        return message
    except KeyError:
        logger.error("Parsing message. KeyError")
        logger.error(msgData)
        return None
Пример #19
0
def main():
    parser = optparse.OptionParser()

    parser.add_option("-n", "--no-log",
        action="store_true", dest="nolog",
        help="Don't store log file", default=False)

    parser.add_option("-m", "--max",
        action="store", dest="max", type="int",
        help="Maximum number of videos to process", default=1)

    parser.add_option("-d", "--dryrun",
        action="store_true", dest="dryrun",
        help="Don't start new zencoder jobs or upload to s3",
        default=False)

    options, args = parser.parse_args()

    setup_logging(options)

    # Grab a lock that times out after 2 days
    with filelock.FileLock("export.lock", timeout=2):
        (success, error_ids) = YouTubeExporter.convert_missing_downloads(
            options.max, options.dryrun)

    if error_ids:
        msg = ('Skipped %d youtube-ids due to errors:\n%s\n'
               % (len(error_ids), '\n'.join(error_ids)))
        logger.warning(msg)
        # Make this part of the stdout output as well, so it gets passed
        # from cron to our email.
        print msg
    return (success, len(error_ids))
Пример #20
0
def download_img(url, title, idx):
    headers = {
        "Accept":
        "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8",
        "Accept-Encoding":
        "gzip, deflate",
        "Accept-Language":
        "en-US,en;q=0.9,zh-CN;q=0.8,zh;q=0.7",
        "Cache-Control":
        "max-age=0",
        "Connection":
        "keep-alive",
        "Upgrade-Insecure-Requests":
        "1",
        "User-Agent":
        "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36"
    }
    img_path = g_path("img", "{}{}{}".format(title, str(idx),
                                             get_extension(url)))
    if img_path.exists():
        return
    try:
        img = requests.get(url, headers=headers)
    except requests.exceptions.ConnectionError as err:
        logger.warning("conn_error:{}:{}".format(title, url))
        return
    if img.status_code != 200:
        logger.warning("wrong status {}:{}".format(title, url))
    else:
        with open(str(img_path), 'wb') as file:
            file.write(img.content)
Пример #21
0
    def train(self, test_dataset=None, saved_model=None):
        total_loss = 0
        best = np.Inf
        if saved_model:
            self.model.load_state_dict(torch.load(saved_model))
        for i in range(self.epoch):
            self.model.train()
            self.model.zero_grad()
            for idx_b, batch in enumerate(self.get_batch()):
                src, trg = batch
                output = self.model(src,
                                    trg,
                                    teacher_forcing_ratio=self.args.tr)
                loss = F.nll_loss(output[1:].view(-1,
                                                  self.dataset.vocab.size()),
                                  trg[1:].contiguous().view(-1),
                                  ignore_index=PAD)
                loss.backward()
                torch.nn.utils.clip_grad_norm(self.model.parameters(),
                                              self.grad_clip)
                self.optimizer.step()
                total_loss += loss.item()

                if idx_b % 10 == 0 and idx_b != 0:
                    total_loss = total_loss / 10
                    self.writer.add_scalar('train_loss_epoch_{}'.format(i),
                                           total_loss, idx_b)
                    total_loss = 0

            if test_dataset is not None:
                start = time.time()
                eval_loss, all_acc, num_sentence = self.evaluate(test_dataset)
                elapsed = time.time() - start
                logger.warning(
                    '-' * 10 + 'test set' + '-' * 88 + '\n' +
                    '| epoch {} | {:5d} / {:5d} dataset size | lr {:05.5f} | time {:5.2f} | '
                    'loss {:5.2f} | all_acc {:5.2f}'.format(
                        i, len(test_dataset), num_sentence, self.lr, elapsed /
                        60, eval_loss, all_acc) + '\n' + '-' * 98)

                if eval_loss < best:
                    best = eval_loss
                    torch.save(
                        self.model.state_dict(),
                        self.args.model + '/' + self.args.task_name +
                        '_loss_' + "%0.2f" % eval_loss + '_epoch_' + str(i) +
                        '_batch_' + str(self.batch) + '.pt')
                else:
                    self.adjust_learning_rate()

            if self.args.test_train:
                start = time.time()
                eval_loss, all_acc, num_sentence = self.evaluate()
                elapsed = time.time() - start
                logger.warning(
                    '-' * 10 + 'train set' + '-' * 88 + '\n' +
                    '| epoch {} | {:5d} / {:5d} dataset size | lr {:05.5f} | time {:5.2f} | '
                    'loss {:5.2f} | all_acc {:5.2f}'.format(
                        i, len(self.dataset), num_sentence, self.lr, elapsed /
                        60, eval_loss, all_acc) + '\n' + '-' * 98)
 def post_data(self, data):
     timestamp = datetime.now().isoformat()
     post_data = POST_DATA_FORMAT.format(self.sensor.sensor_id, data, timestamp)
     http = httplib.HTTPConnection(Config.get("WEBSERVER_HOST_NAME"))
     http.request("POST", Config.get("WEBSERVER_POST_DATA_URI"), post_data)
     response = http.getresponse()
     if response.status != 200:
         logger.warning("Error posting images to webserver. " + response.reason)
Пример #23
0
 def parse_thread(r):
     try:
         j = json.loads(r.content.decode('utf-8', 'ignore'))
     except JSONDecodeError:
         logger.warning("JSONDecodeError for %s:" % (r.url, ))
         logger.warning(r.text)
         return []
     return j["posts"]
Пример #24
0
 def handle_deal_task(self, download, task, upload, cleanup):
     self.commands = download + task + upload + cleanup
     try:
         self.perform_commands()
     except Exception as e:
         logger.warning('Worker exception: ' + str(e))
     self.commands = None
     self.do(self.ready)
Пример #25
0
 def finalize_worker(self, worker, result):
     if result['success']:
         logger.info('Worker {} initialized successfully'.format(worker.id))
         worker.initialized = True
     else:
         logger.warning(
             'Failed to initialize worker {}. Stderr: {}\nCommand: {}'.
             format(worker.id, result['stderr'], result['command']))
Пример #26
0
def getSetFromFile(filePath):
    theSet = set([])
    try:
        with open(filePath, 'r') as f:
            theSet = {line.strip() for line in f}
    except IOError:
        logger.warning("No such file " + filePath)
    return theSet
Пример #27
0
def getSetFromFile(filePath):
    theSet = set([])
    try:
        with open(filePath, 'r') as f:
            theSet = {line.strip() for line in f}
    except IOError:
        logger.warning("No such file " + filePath)
    return theSet
Пример #28
0
 def move(self, worker_id, source, destination):
     try:
         source.pop(source.index(worker_id))
     except ValueError:
         logger.warning(
             'Attempting remove worker from queue that does not contain it.'
         )
         return
     destination.append(worker_id)
def ERROR(code,appcode,s):
    l = "%s %s %s" % (code,appcode,s)
    for ke in known_errors.iter('Error'):
        if(ke.attrib['ErrorCode'] == code and
           ke.attrib['AppCode'] == appcode):
            logger.warning(l)
            logger.warning("  >> %s",ke.attrib['Description'])
            return
    logger.error(l)
Пример #30
0
def calc_nodata_9999_lineage(stacking, band_name, clip_extents, tile_id,
                             rename, workdir):
    """Clip scenes which have data outside the lineage, apply -9999 fill."""
    logger.info('     Start processing for band: %s', band_name)

    mosaic_filename = os.path.join(workdir, tile_id,
                                   tile_id + '_' + rename + '.tif')

    if os.path.exists(mosaic_filename):
        logger.warning("Skip previously generated result %s", mosaic_filename)
        return mosaic_filename

    temp_clipped_names = list()
    temp_masked_names = list()
    for level, stack in reversed(list(enumerate(stacking, start=1))):
        scene_name = util.ffind(workdir, stack['LANDSAT_PRODUCT_ID'],
                                '*' + band_name + '.tif')

        temp_name1 = mosaic_filename.replace('.tif',
                                             '_temp%d' % level + '.tif')
        temp_warp_cmd = ('gdalwarp -te {extents}'
                         ' -dstnodata "-9999" -srcnodata "-9999" {0} {1}')
        util.execute_cmd(
            temp_warp_cmd.format(scene_name, temp_name1, extents=clip_extents))
        temp_clipped_names.append(temp_name1)

        lineg_name = util.ffind(workdir, tile_id, '*LINEAGEQA.tif')
        temp_name2 = mosaic_filename.replace('.tif',
                                             '_temp%dM' % level + '.tif')
        temp_calc_cmd = ('gdal_calc.py -A {0} -B {lineage} --outfile {1}'
                         ' --calc="(A*(B=={level}) + (-9999*(B!={level})))"'
                         ' --NoDataValue=-9999')
        util.execute_cmd(
            temp_calc_cmd.format(temp_name1,
                                 temp_name2,
                                 lineage=lineg_name,
                                 level=level))
        temp_masked_names.append(temp_name2)

    temp_name = mosaic_filename.replace('.tif', '_temp.tif')
    temp_warp_cmd = 'gdalwarp {} {}'.format(' '.join(temp_masked_names),
                                            temp_name)
    util.execute_cmd(temp_warp_cmd)
    util.remove(*temp_masked_names + temp_clipped_names)

    warp_cmd = (
        'gdalwarp -dstnodata "-9999" -srcnodata "-9999" -co "compress=deflate"'
        ' -co "zlevel=9" -co "tiled=yes" -co "predictor=2" {} {}')
    util.execute_cmd(warp_cmd.format(temp_name, mosaic_filename))
    util.remove(temp_name)

    logger.info('    End processing for %s as %s ', band_name, mosaic_filename)
    if not os.path.exists(mosaic_filename):
        logger.error('Processing failed to generate desired output: %s',
                     mosaic_filename)
    return mosaic_filename
Пример #31
0
    def OnReceiveMessage(self, is_system_error, msg_code, msg):
        code = int(msg_code)
        msg = f"OnReceiveMessage: is_system_error={is_system_error}, msg_code={msg_code}, msg={msg}"

        if code < 1000:
            logger.debug(msg)
        elif 1000 <= code < 8000:
            logger.warning(msg)
        else:
            logger.fatal(msg)
Пример #32
0
 def __getitem__(self, item):
     try:
         return self._players[int(item)]
     except (ValueError, KeyError):
         if not self._warned:
             logger.warning("Expensive lookup by player login, FIXME.")
             self._warned = True
         by_login = {p.login: p for _, p in self._players.items()}
         if item in by_login:
             return by_login[item]
Пример #33
0
 def handle_push_tasks(self, *, tasks, merge_strategy=None):
     logger.info('Queueing tasks.')
     for task_dict in tasks:
         task = self.create_task(task_dict)
         if task.init:
             if self.init_task is not None:
                 logger.warning('Replacing init task')
             self.init_task = task
         else:
             self.task_queue.push(task, merge_strategy=merge_strategy)
Пример #34
0
 def __getitem__(self, item):
     try:
         return self._players[int(item)]
     except (ValueError, KeyError):
         if not self._warned:
             logger.warning("Expensive lookup by player login, FIXME.")
             self._warned = True
         by_login = {p.login: p for _, p in self._players.items()}
         if item in by_login:
             return by_login[item]
Пример #35
0
def map_output(transfer_env, i_value, added_constant, gnn_option_list):
    '''
        @brief:
            i_value could be the logstd (1, num_action), policy_output/w
            (64, num_action), policy_output/b (1, num_action)
    '''
    assert len(gnn_option_list) == 4
    i_value = np.transpose(i_value)  # make the num_action to the front
    ienv, oenv = [env + '-v1' for env in transfer_env.split('2')]
    ienv_info = mujoco_parser.parse_mujoco_graph(
        ienv,
        gnn_node_option=gnn_option_list[0],
        root_connection_option=gnn_option_list[1],
        gnn_output_option=gnn_option_list[2],
        gnn_embedding_option=gnn_option_list[3])
    oenv_info = mujoco_parser.parse_mujoco_graph(
        oenv,
        gnn_node_option=gnn_option_list[0],
        root_connection_option=gnn_option_list[1],
        gnn_output_option=gnn_option_list[2],
        gnn_embedding_option=gnn_option_list[3])
    if len(i_value.shape) > 1:
        o_value = np.zeros([len(oenv_info['output_list']), i_value.shape[1]])
    else:
        # the b matrix
        o_value = np.zeros([len(oenv_info['output_list'])])
    assert len(i_value) == len(ienv_info['output_list'])

    ienv_node_name_list = [node['name'] for node in ienv_info['tree']]
    for output_id, output_node_id in enumerate(oenv_info['output_list']):
        # get the name of the joint
        node_name = oenv_info['tree'][output_node_id]['name']
        # if the node is alreay in the input environment?
        if node_name in ienv_node_name_list:
            if ienv_node_name_list.index(node_name) not in \
                    ienv_info['output_list']:
                logger.warning('Missing joint: {}'.format(node_name))
                continue
            o_value[output_id] = i_value[ienv_info['output_list'].index(
                ienv_node_name_list.index(node_name))]
        else:
            # the name format: "@type_@name_@number", e.g.: joint_leg_1
            assert len(node_name.split('_')) == 3
            # find all the repetitive node and calculate the average
            repetitive_struct_node_list = [
                ienv_node_name_list.index(name) for name in ienv_node_name_list
                if node_name.split('_')[1] == name.split('_')[1]
            ]
            num_reptitive_nodes = float(len(repetitive_struct_node_list))
            assert len(repetitive_struct_node_list) >= 1

            for i_node_id in repetitive_struct_node_list:
                o_value[output_id] += i_value[ienv_info['output_list'].index(
                    i_node_id)] / num_reptitive_nodes
    return np.transpose(o_value) + added_constant
Пример #36
0
def process_lineage_contributing(lineage_filename, n_contrib_scenes):
    """Check historgram for count of scenes which were not all-fill."""
    logger.info('    Start checking contributing scenes')

    info_cmd = 'gdalinfo -hist {}'
    results = util.execute_cmd(info_cmd.format(lineage_filename))
    # TODO: could potentially use this instead...
    util.remove(lineage_filename + '.aux.xml')
    count, array = geofuncs.parse_gdal_hist_output(results['output'])

    logger.info(
        'Parsing histogram from lineage file found %d'
        ' contributing scenes', count)
    if count == 0:
        logger.warning('Found all fill lineage, tile not needed!')
        raise ArdTileNotNeededException()

    # decrement pixel values in lineage file if some scenes didn't contribute
    # any pixels
    if count != n_contrib_scenes:
        delta = n_contrib_scenes - count

        # Determine whether we need decrement the pixel
        # values in the lineage file or not.
        cmd = ''
        if delta == 1:
            if array[0] == 0:
                cmd = ' --calc="A-' + str(delta) + '"'
            elif array[1] == 0 and array[2] > 0:
                cmd = ' --calc="A-(A==3)"'
        elif delta == 2:
            if array[0] == 0 and array[1] == 0:
                cmd = ' --calc="A-' + str(delta) + '"'
            elif array[0] == 0 and array[2] == 0:
                cmd = ' --calc="A-' + str(1) + '"'

        if cmd != '':
            temp_name = lineage_filename.replace('.tif', '_linTemp.tif')
            calc_cmd = ('gdal_calc.py -A {lineage} --outfile {temp} {calc}'
                        ' --type="Byte" --NoDataValue=0 --overwrite')
            util.execute_cmd(
                calc_cmd.format(lineage=lineage_filename,
                                temp=temp_name,
                                calc=cmd))

            # compress
            warp_cmd = ('gdalwarp -co "compress=deflate" -co "zlevel=9"'
                        ' -co "tiled=yes" -co "predictor=2"'
                        ' -overwrite {} {}')
            util.execute_cmd(warp_cmd.format(temp_name, lineage_filename))
            util.remove(temp_name)

    logger.info('finish updating contributing scenes')
    return count
Пример #37
0
    def get_value(self, sensor):
        http = httplib.HTTPSConnection(API_URL)
        http.request("GET", sensor.api_uri)
        response = http.getresponse()

        if response.status != 200:
            logger.warning("Error fetching sensor value")
            raise Exception
        else:
            data = json.loads(response.read())
            return float(data["result"])
Пример #38
0
 def  _starttls():
     tlssession = SMTP()
     ext_log(tlssession.connect(getconf('smtp_server'), getconf('smtp_port')), 'startTLS connection')
     tlssession.ehlo()
     if tlssession.has_extn('STARTTLS'):
         ext_log(tlssession.starttls(context=context), 'startTLS')
         tlssession.ehlo()
         return tlssession
     else:
         logger.warning('plaintext connection')
         return tlssession
    def get_value(self, sensor):
        http = httplib.HTTPSConnection(API_URL)
        http.request("GET", sensor.api_uri)
        response = http.getresponse()

        if response.status != 200:
            logger.warning("Error fetching sensor value")
            raise Exception
        else:
            data = json.loads(response.read())
            return float(data["result"])
Пример #40
0
    def __init__(self):
        self.n_cameras = int(Config.get("N_CAMERAS"))

        # Initialize the cameras
        self.cameras = []
        for i in xrange(self.n_cameras):
            try:
                self.cameras.append(Camera(camera_index = i))
            except:
                logger.warning("Error opening camera #"+str(i))
        
        self.n_cameras = len(self.cameras)
    def get_values(self):
        values = []
        data = self.psocket.recv(512)

        if len(data) != 6*4:
            logger.warning("Invalid packet format")
            raise Exception

        for i in xrange(6):
            value = struct.unpack("I", data[(i*4):(i+1)*4])
            values.append(value[0])
        return values
Пример #42
0
def repost_replies(account_name):
    bf = open('.blacklist_%s'%account_name,'a+')
    blacklist = bf.read().splitlines()
    bf.close()

    rp = open('.reposted_%s'%account_name,'a+')
    reposted = rp.read().splitlines()

    account = settings.ACCOUNTS.get(account_name)

    try:
        logging.info('[%s] Getting last mentions offset'%account_name)
        bot = TwitterBot(settings.CONSUMER_KEY,settings.CONSUMER_SECRET,
                         account['key'],account['secret'])
        mentions = []
        try:
            mentions = bot.api.mentions()
            logging.info('[%s] Got %d mentions'%(account_name,len(mentions)))
        except Exception,e:
            logging.error('[%s] Failed to get mentions. %s'%(account_name,e))

        for mess in reversed(mentions):
            try:
                author = mess.author.screen_name
                if str(author) in blacklist:
                    logging.debug('[%s] Author %s blacklisted. Skipping.'%(account_name,str(author)))
                    continue
                if str(mess.id) in reposted:
                    logging.debug('[%s] Message #%s already reposted. Skipping.'%(account_name,str(mess.id)))
                    continue

                message = mess.text.split(' ')
                if message[0] != '@%s'%account_name:
                    continue #not a "@reply"

                trigger = message[1]
                triggers = dict(account['triggers'])
                if trigger not in triggers:
                    logging.warning('[%s] Bad message format, sending DM to author'%account_name)
                    bot.dm(author,account['not_triggered'])
                else:
                    len_params = {'message':'','user':author}
                    mess_len = len(triggers[trigger]%len_params)
                    params = {'message':bot.trim_message(' '.join(message[2:]),mess_len),'user':author}
                    message = triggers[trigger]%params
                    logging.info('[%s] Tweeting message %s'%(account_name,message))
                    bot.tweet(message)
                rp.write('%s\n'%mess.id)
            except Exception,e:
                logging.error('%s'%e)
                continue
    def start(self):
        while True:
            start_time = time.time()

            try:
                self.get_and_send_api()
                #self.get_and_send_udp()
            except Exception, e:
                logger.warning("Error forwarding data")

            elapsed_time = time.time() - start_time

            # Wait before we execute the next round
            time.sleep(max(0, PERIOD - elapsed_time))
Пример #44
0
def list_converted_formats():
    """Returns a dict that maps youtube_ids (keys) to a set of available converted formats (values)"""
    converted_videos = defaultdict(set)
    legacy_video_keys = set()
    for key in converted_bucket.list(delimiter="/"):
        video_match = re_video_key_name.match(key.name)
        if video_match is None:
            if re_legacy_video_key_name.match(key.name) is not None:
                legacy_video_keys.add(key.name)
            else:
                logger.warning("Unrecognized key {0} is not in format YOUTUBE_ID.FORMAT/".format(key.name))
        else:
            converted_videos[video_match.group(1)].add(video_match.group(2))
    logger.info("{0} legacy converted videos were ignored".format(len(legacy_video_keys)))
    return converted_videos
    def run(self):
        logger.debug("Start DataPoster: " + str(self.sensor))
        while True:
            # Wait for the next data point
            data = self.socket.recv(512)

            # Validate by castings to a float
            try:
                data = float(data)
            except:
                logger.warning("Unable to cast value:"+str(data))
                continue

            # Post the value to the webserver
            try:
                self.post_data(data)
            except:
                logger.warning("Unable to POST data")
Пример #46
0
def parseMessagesFromFile(filePath, limit=0, startDate=None, endDate=None):
    messages = []
    senders = set([])
    if startDate:
        startDate = datetime.strptime(startDate, Message.DATE_FORMAT)
    if endDate:
        endDate = datetime.strptime(endDate, Message.DATE_FORMAT)
    try:
        with open(filePath, 'r', encoding="utf8") as f:
            for line in f:
                date, time, sender, text = line.split(' ', 3)
                if startDate or endDate:
                    thisDate = datetime.strptime(date, Message.DATE_FORMAT)
                    if (not startDate or thisDate>=startDate) and (not endDate or thisDate<=endDate):
                        messages.append(Message(date, time, sender, text.strip()))
                else:
                    messages.append(Message(date, time, sender, text.strip()))
                senders.add(sender)
                if limit != 0 and len(messages) >= limit:
                    break
    except IOError:
        logger.warning("No such file: " + filePath)
    return messages, senders
Пример #47
0
def get_or_create_unconverted_source_url(youtube_id):
    matching_keys = list(unconverted_bucket.list(youtube_id))

    # TODO(alpert): How do these .part files get created? They're not real
    # video files and should be ignored.
    matching_keys = [key for key in matching_keys
                     if not key.name.endswith('.part')]
                     
    matching_key = None

    if matching_keys:
        if len(matching_keys) > 1:
            logger.warning("More than 1 matching unconverted video "
                           "URL found for video {0}".format(youtube_id))
        matching_key = matching_keys[0]
    else:
        logger.info("Unconverted video not available on s3 yet, "
                    "downloading from youtube to create it.")

        video_path = youtube.download(youtube_id)
        if not video_path:
            logger.warning("Error downloading video {0}".format(youtube_id))
            return
        logger.info("Downloaded video to {0}".format(video_path))

        video_extension = splitext(video_path)[1]
        assert video_extension[0] == "."
        video_extension = video_extension[1:]
        if video_extension not in ["flv", "mp4"]:
            logger.warning("Unrecognized video extension {0} when downloading "
                           "video {1} from YouTube".format(
                               video_extension, youtube_id))

        matching_key = Key(unconverted_bucket, "{0}/{0}.{1}".format(
            youtube_id, video_extension))
        matching_key.set_contents_from_filename(video_path)

        os.remove(video_path)
        logger.info("Deleted {0}".format(video_path))

    return "s3://{0}/{1}".format(unconverted_bucket.name, matching_key.name)
Пример #48
0
		source.locked = True
		source.save()
		logger.info("Starting the update from %s, with PID:%d." % (source.name, os.getpid()))
	
	if(source.md5url and len(source.md5url) > 0):
		UpdateLog.objects.create(update=update, time=datetime.datetime.now(), logType=UpdateLog.PROGRESS, text="1 Trying to fetch md5sum, to compare with last processed file.")
		try:
			socket = urllib2.urlopen(source.md5url)
			md5 = socket.read()
			md5 = md5.strip()
			socket.close()

			logger.debug("Downloaded-MD5:'%s'" % str(md5))
			logger.debug("LastUpdate-MD5:'%s'" % str(source.lastMd5))
		except:
			logger.warning("Could not find the md5-file at %s. Proceeding to download the main update-file." % source.md5url)
			md5 = ""
	else:
		logger.info("No md5-url file found. Proceeding to download the main update-file.")
		md5 = ""
	
	
	if(len(str(md5)) == 0 or str(md5) != str(source.lastMd5)):
		UpdateLog.objects.create(update=update, time=datetime.datetime.now(), logType=UpdateLog.PROGRESS, text="2 Downloading ruleset from source.")
		logger.info("Starting to download %s" % source.url)
		storagelocation = Config.get("storage", "inputFiles")		
		filename = storagelocation + source.url.split("/")[-1]
		
		if(os.path.isdir(storagelocation) == False):
			os.makedirs(storagelocation)
Пример #49
0
    def convert_missing_downloads(max_videos, dryrun=False):
        """Download from YouTube and use Zencoder to start converting any
        missing downloadable content into its appropriate downloadable format.
        """

        videos_converted = 0
        error_ids = []

        # With this option, videos that are missing in the S3 converted
        # bucket are converted. The API's download_urls is ignored.
        logger.info("Searching for videos that are missing from S3")
        formats_to_convert = s3.list_missing_converted_formats()
        legacy_mp4_videos = s3.list_legacy_mp4_videos()

        for youtube_id, missing_formats in formats_to_convert.iteritems():
            if videos_converted >= max_videos:
                logger.info("Stopping: max videos reached")
                break

            if "_DUP_" in youtube_id:
                logger.info(
                    ("Skipping video {0} as it has invalid DUP in youtube ID"
                     .format(youtube_id)))
                continue

            # We already know the formats are missing from S3.
            formats_to_create = missing_formats
            if (youtube_id in legacy_mp4_videos and
                    "mp4" in formats_to_create):
                if dryrun:
                    logger.info(
                        "Skipping copy of legacy content due to dryrun")
                else:
                    s3.copy_legacy_content_to_new_location(youtube_id)
                formats_to_create.remove("mp4")

            if len(formats_to_create) == 0:
                continue

            logger.info("Starting conversion of %s into formats %s" %
                        (youtube_id, ",".join(formats_to_create)))

            if dryrun:
                logger.info(
                    "Skipping downloading and sending job to zencoder due to "
                    "dryrun")
                videos_converted += 1
            else:
                s3_source_url = s3.get_or_create_unconverted_source_url(
                    youtube_id)
                if not s3_source_url:
                    logger.warning("No S3 source URL created for %s; skipping"
                                   % youtube_id)
                    error_ids.append(youtube_id)
                    continue

                try:
                    zencode.start_converting(youtube_id, s3_source_url,
                                             formats_to_create)
                    videos_converted += 1
                except Exception, why:
                    logger.error('Skipping youtube_id "%s": %s'
                                 % (youtube_id, why))
                    error_ids.append(youtube_id)
Пример #50
0
def ext_log(command, text, warn=False):
    logresult = command
    line = '%s: status: %d ~ %s' %(text, logresult[0], logresult[-1].decode('UTF-8'))
    if warn: logger.warning(line)
    else: logger.info(line)