コード例 #1
0
    def __init__(self, path, bands=None, dst_path=None, verbose=False):
        """
        @params
        scene - the scene ID
        bands - The band sequence for the final image. Must be a python list
        src_path - The path to the source image bundle
        dst_path - The destination path
        zipped - Set to true if the scene is in zip format and requires unzipping
        verbose - Whether to sh ow verbose output
        """

        self.projection = {'init': 'epsg:3857'}
        self.dst_crs = {'init': u'epsg:3857'}
        self.scene = get_file(path).split('.')[0]
        self.bands = bands if isinstance(bands, list) else [4, 3, 2]

        # Landsat source path
        self.src_path = path.replace(get_file(path), '')

        # Build destination folder if doesn't exits
        self.dst_path = dst_path if dst_path else settings.PROCESSED_IMAGE
        self.dst_path = check_create_folder(join(self.dst_path, self.scene))
        self.verbose = verbose

        # Path to the unzipped folder
        self.scene_path = join(self.src_path, self.scene)

        if self._check_if_zipped(path):
            self._unzip(join(self.src_path, get_file(path)), join(self.src_path, self.scene), self.scene)

        self.bands_path = []
        for band in self.bands:
            self.bands_path.append(join(self.scene_path, self._get_full_filename(band)))
コード例 #2
0
def create_body(_name, _result, _date):
    """
    Creates the body of the email based on the result
    """
    global name, date

    name = _name
    result = _result
    date = _date
    
    filename = "email_user_template.json" if os.path.isfile(get_file("email_user_template.json")) else "email_template.json"

    with open(get_file(filename), 'r', encoding='utf8') as fl:
        data = json.load(fl)

    message = f(data["corpo 1"])

    result = result.lower()

    if result == "positivo": 
        message += f(data["positivo"])
    elif result == "inconclusivo": 
        message += f(data["inconclusivo"])
    elif result == "negativo":
        message += f(data["negativo"])
    else: raise Exception("Resultado inválido.", 
        "Resultado pode ser somente positivo, negativo ou inconclusivo") 

    message += f(data["corpo 2"]) + f(data["assinatura"])
    return message
コード例 #3
0
ファイル: image.py プロジェクト: dongjwOU/landsat-util
    def __init__(self, path, bands=None, dst_path=None, verbose=False):
        """
        @params
        scene - the scene ID
        bands - The band sequence for the final image. Must be a python list
        src_path - The path to the source image bundle
        dst_path - The destination path
        zipped - Set to true if the scene is in zip format and requires unzipping
        verbose - Whether to sh ow verbose output
        """

        self.projection = {'init': 'epsg:3857'}
        self.dst_crs = {'init': u'epsg:3857'}
        self.scene = get_file(path).split('.')[0]
        self.bands = bands if isinstance(bands, list) else [4, 3, 2]

        # Landsat source path
        self.src_path = path.replace(get_file(path), '')

        # Build destination folder if doesn't exits
        self.dst_path = dst_path if dst_path else settings.PROCESSED_IMAGE
        self.dst_path = check_create_folder(join(self.dst_path, self.scene))
        self.verbose = verbose

        # Path to the unzipped folder
        self.scene_path = join(self.src_path, self.scene)

        if self._check_if_zipped(path):
            self._unzip(join(self.src_path, get_file(path)), join(self.src_path, self.scene), self.scene)

        self.bands_path = []
        for band in self.bands:
            self.bands_path.append(join(self.scene_path, self._get_full_filename(band)))
コード例 #4
0
ファイル: api.py プロジェクト: mboman/vxcage-mongodb
def get_malware(filehash):
    '''
    Retreive sample object by hash
    @md5 : md5 hash
    @sha1 : sha1 hash
    @sha256 : sha256 hash
    @sha512 : sha512 hash
    @filehash : any of the above hash methods, the method will try to
                identify the hash algorithm.
    returns : binary sample or a JSON status message 
    '''

    if not filehash:
        md5 = request.forms.get('md5')
        sha1 = request.forms.get('sha1')
        sha256 = request.forms.get('sha256')
        sha512 = request.forms.get('sha512')

        if md5 != None:
            filehash = md5
        if sha1 != None:
            filehash = sha1
        if sha256 != None:
            filehash = sha256
        if sha512 != None:
            filehash = sha512

    if re.findall(r"([a-fA-F\d]{32})", filehash):

        # MD5

        sampleData = get_file(db, md5=filehash)
    elif re.findall(r"([a-fA-F\d]{40})", filehash):

        # SHA1

        sampleData = get_file(db, sha1=filehash)
    elif re.findall(r"([a-fA-F\d]{64})", filehash):

        # SHA256

        sampleData = get_file(db, sha256=filehash)
    elif re.findall(r"([a-fA-F\d]{128})", filehash):

        # SHA512

        sampleData = get_file(db, sha512=filehash)
    else:

        # Hash not recognized

        response.content_type = 'application/json'
        return (jsonize({'error': 'unknown_hash'}), 400)

    if sampleData:
        response.content_type = 'application/octet-stream'
        return sampleData
    else:
        response.content_type = 'application/json'
        return (jsonize({'error': 'sample_not_found'}), 404)
コード例 #5
0
ファイル: image.py プロジェクト: GEO-IASS/landsat-util
    def __init__(self, path, bands=None, dst_path=None, verbose=False, force_unzip=False, bounds=None):

        self.projection = {'init': 'epsg:3857'}
        self.dst_crs = {'init': u'epsg:3857'}
        self.scene = get_file(path).split('.')[0]
        self.bands = bands if isinstance(bands, list) else [4, 3, 2]
        self.clipped = False

        # Landsat source path
        self.src_path = path.replace(get_file(path), '')

        # Build destination folder if doesn't exist
        self.dst_path = dst_path if dst_path else os.getcwd()
        self.dst_path = check_create_folder(join(self.dst_path, self.scene))
        self.verbose = verbose

        # Path to the unzipped folder
        self.scene_path = join(self.src_path, self.scene)

        # Unzip files
        if self._check_if_zipped(path):
            self._unzip(join(self.src_path, get_file(path)), join(self.src_path, self.scene), self.scene, force_unzip)

        if (bounds):
            self.bounds = bounds
            self.scene_path = self.clip()
            self.clipped = True

        self.bands_path = []
        for band in self.bands:
            self.bands_path.append(join(self.scene_path, self._get_full_filename(band)))
コード例 #6
0
ファイル: A2_db_index.py プロジェクト: achtman-lab/SPARSE
def mash_proc(data):
    idx, file_link, url_link, params = data
    if not os.path.isfile(file_link):
        file_link = '-'
    if file_link == '-':
        fname = url_link.rsplit('/', 1)[-1]
        try:
            utils.get_file(url_link, fname)
        except:
            return idx, '', '', ''
    else:
        fname = file_link
    try:
        sha = utils.fileSHA(fname)
        if os.path.isfile(fname.rsplit('/', 1)[-1] + '.msh'):
            os.unlink(fname.rsplit('/', 1)[-1] + '.msh')
        msh_file = utils.get_mash(fname,
                                  fname.rsplit('/', 1)[-1],
                                  is_read=False,
                                  **params)
        if file_link == '-':
            os.unlink(fname)
        return idx, sha, msh_file, fname
    except:
        return idx, '', '', ''
コード例 #7
0
    def xmas_countdown(self):

        self.matrix.clear()

        debug.info("Counting down to Christmas!")
        #check for three-digit countdown
        if self.days_to_xmas < 99:
            x_pos = 7
        else:
            x_pos = 1

        #draw days to xmas
        self.matrix.draw_text((x_pos, 6),
                              str(self.days_to_xmas),
                              font=self.font.large,
                              fill=(0, 255, 0))

        #choose one of three daily images to draw based on days to xmas and draw it
        if self.days_to_xmas % 3 == 0:
            xmas_image = Image.open(get_file('assets/images/xmas_tree.png'))
        elif self.days_to_xmas % 3 == 2:
            xmas_image = Image.open(get_file('assets/images/candy_cane.png'))
        else:
            xmas_image = Image.open(get_file('assets/images/gbread.png'))

        self.matrix.draw_image((36, 1), xmas_image)

        #draw bottom text
        self.matrix.draw_text((1, 26),
                              "DAYS TO CHRISTMAS",
                              font=self.font,
                              fill=(255, 0, 0))

        self.matrix.render()
        self.sleepEvent.wait(15)
コード例 #8
0
    def __get_config(self, base_filename, error=None):
        # Look and return config.json file

        filename = "{}.json".format(base_filename)

        (reference_config, error) = self.read_json(filename)
        if not reference_config:
            if (error):
                debug.error(error)
            else:
                debug.error(
                    "Invalid {} config file. Make sure {} exists in config/".
                    format(base_filename, base_filename))
            sys.exit(1)

        if base_filename == "config":
            # Validate against the config.json
            debug.info("Now validating config.json.....")
            conffile = "config/config.json"
            schemafile = "config/config.schema.json"

            confpath = get_file(conffile)
            schemapath = get_file(schemafile)
            (valid, msg) = validateConf(confpath, schemapath)
            if valid:
                debug.info("config.json passes validation")
            else:
                debug.error(
                    "config.json fails validation: error: [{0}]".format(msg))
                debug.error(
                    "Rerun the nhl_setup app to create a valid config.json")
                sys.exit(1)

        return reference_config
コード例 #9
0
ファイル: fetch_spaceX.py プロジェクト: uzigang16/gram_bot
def fetch_spacex_last_launch():
    response = requests.get('{}launches/latest'.format(SPACEX_URL_TEMPLATE))
    response.raise_for_status()
    resp_body = response.json()
    images_urls = resp_body["links"]["flickr_images"]

    for num, url in enumerate(images_urls):
        utils.get_file(url, f"spacex{num}.jpg")
コード例 #10
0
    def reset():
        filename = "email_user_template.json" if os.path.isfile(
            get_file("email_user_template.json")) else "email_template.json"

        with open(get_file(filename), 'r', encoding='utf8') as fl:
            data = json.load(fl)

        _edit_email(window, data)
コード例 #11
0
def main():
    # Load Active Worker Machines
    active_nodes, nodes = load_workers()
    start = time.time()
    running = True
    cycle = 0
    new_scans = 0
    try:
        while running:

            highest = 0
            tic = time.time()
            os.system('clear')
            header = '| HOST  |  MAX_HOPS \t|  TOTAL_TRACES |\n ' + '=' * 40
            print '\033[1m' + header + '\033[0m'
            # count local hops first
            os.system('cp hops.txt oghops.txt')
            local_count, n_local = process_hops()
            if local_count > highest:
                highest = local_count
            d = '| LOCAL | \t%d \t| \t%d \t|' % (local_count, n_local)
            print '\033[1m' + d + '\033[0m'
            counts = {}
            total = n_local
            # Check in with Each one and see what the current best result is
            for worker in active_nodes:
                h, i, p, m = nodes[worker]
                rp = '/home/%s/Documents/PoolParty/code/0.6/hops.txt' % h
                utils.get_file(rp, h, i, p, False)
                maxima, ntraced = process_hops()
                counts[worker] = [maxima, ntraced]
                rd = '| %s | \t%d \t| \t%d \t|' % (worker, maxima, ntraced)
                print '\033[1m' + rd + '\033[0m'
                total += ntraced
                if maxima > highest:
                    highest = maxima
            dt = time.time() - start
            # put local hops file back
            os.system('mv oghops.txt hops.txt')
            stats = ' MOST HOPS: \033[31m%d\033[0m\033[1m\t TOTAL RUN:\033[31m %d' % (
                highest, total)

            if cycle > 0:
                new_scans = total - new_scans
                ratio = str((new_scans) / (time.time() - tic))
            else:
                new_scans = total
                ratio = '?'
            cycle += 1

            print '\033[1m ' + '=' * 40 + '\033[0m'
            print '\033[1m| ' + stats + '\033[0m\033[1m |\033[0m'
            print '\033[1m| Time: %ss  [%s/s] |\033[0m' % (dt, ratio)
            print '\033[1m ' + '=' * 40 + '\033[0m'
            time.sleep(20)
    except KeyboardInterrupt:
        running = False
        pass
コード例 #12
0
def fetch_hubble_image(*ids):
    for id in ids:
        url = '{}image/{}'.format(HUBBLE_URL_TEMPLATE, id)
        response = requests.get(url)
        response.raise_for_status()
        resp_body = response.json()
        images = resp_body['image_files']
        full_url = "https:{}".format(images[-1]['file_url'])
        utils.get_file(full_url, "hubble{}{}".format(id, os.path.splitext(full_url)[-1]))
コード例 #13
0
    def __init__(self, coordinates):
        self.coord = coordinates

        # Load the fonts
        self.font_large = ImageFont.truetype(
            get_file("assets/fonts/score_large.otf"), 16)
        self.font = ImageFont.truetype(get_file("assets/fonts/04B_24__.TTF"),
                                       8)
        self.font_large_2 = ImageFont.truetype(
            get_file("assets/fonts/04B_24__.TTF"), 24)
コード例 #14
0
 def __init__(self):
     # Load the fonts
     self.font_large = ImageFont.truetype(
         get_file("assets/fonts/score_large.otf"), 16)
     self.font_pb = ImageFont.truetype(
         get_file("assets/fonts/score_large.otf"), 22)
     self.font = ImageFont.truetype(get_file("assets/fonts/04B_24__.TTF"),
                                    8)
     self.font_large_2 = ImageFont.truetype(
         get_file("assets/fonts/04B_24__.TTF"), 24)
コード例 #15
0
def downAGroup(urlPaths):
    try:
        pid = os.getpid()
        print("子进程 {0} 开始".format(pid))
        for item in urlPaths:
            print("[NEW {0}] {1}".format(pid, item["url"]))
            get_file(item["url"], item["path"])
        print("子进程结束")
    except Exception as e:
        msg = "子进程错误:%s" % e
        print(msg)
コード例 #16
0
def _download_data(
    num_worker: int, cache_dir: str, base_url: str
):
  """
  Download the entire GLD v2 dataset, subset the dataset to only include the
  images in the federated GLD v2 dataset, and create both gld23k and gld160k
  datasets.
  Args:
    num_worker: The number of threads for downloading the GLD v2 dataset.
    cache_dir: The directory for caching temporary results.
    base_url: The base url for downloading GLD images.
  """
  logger = logging.getLogger(LOGGER)
  logger.info('Start to download fed gldv2 mapping files')

  path = get_file(
      '%s.zip' % FED_GLD_SPLIT_FILE_BUNDLE,
      origin=FED_GLD_SPLIT_FILE_DOWNLOAD_URL,
      extract=True,
      archive_format='zip',
      cache_dir=cache_dir)

  get_file(
      MINI_GLD_TRAIN_SPLIT_FILE,
      origin=MINI_GLD_TRAIN_DOWNLOAD_URL,
      cache_dir=cache_dir)
  get_file(
      MINI_GLD_TEST_SPLIT_FILE,
      origin=MINI_GLD_TEST_DOWNLOAD_URL,
      cache_dir=cache_dir)

  logger.info('Fed gldv2 mapping files are downloaded successfully.')
  base_path = os.path.dirname(path)
  train_path = os.path.join(base_path, FED_GLD_SPLIT_FILE_BUNDLE,
                            FED_GLD_TRAIN_SPLIT_FILE)
  test_path = os.path.join(base_path, FED_GLD_SPLIT_FILE_BUNDLE,
                           FED_GLD_TEST_SPLIT_FILE)
  train_mapping = _read_csv(train_path)
  test_mapping = _read_csv(test_path)
  all_images = set()
  all_images.update([row['image_id'] for row in train_mapping],
                    [row['image_id'] for row in test_mapping])
  image_dir = os.path.join(cache_dir, 'images')
  if not os.path.exists(image_dir):
    os.mkdir(image_dir)
  logger.info('Start to download GLDv2 dataset.')
  with multiprocessing.pool.ThreadPool(num_worker) as pool:
    train_args = [
        (i, all_images, image_dir, base_url) for i in range(NUM_SHARD_TRAIN)
    ]
    pool.starmap(_filter_images, train_args)

  logger.info('Finish downloading GLDv2 dataset.')
コード例 #17
0
def update(ctx, repository):
    ''' update repository(-ies)
    '''
    repo = getRepo(ctx, repository)
    get_file(repo.index_url, repo._index_path)
    threadNum = 20
    urlPathGroup = repo.getNeedUrlPathsNGroup(threadNum)
    po = Pool(threadNum)
    for i in range(0, threadNum):
        po.apply_async(downAGroup, args=(urlPathGroup[i], ))
    po.close()  #关闭进程池, 关闭后po不再接收新的请求
    po.join()  #等待po中所有⼦进程执⾏完成, 必须放在close语句之后
    print("父进程结束")
コード例 #18
0
 def lp_topo_get(self, topo, upstream=None):
     """
     search_order : get topologies from upstream if mentioned
                    get topologies from core package
     # need to add checks for ./topologies
     """
     if upstream is None:
         get_file(self.base_path + "/ex_topo/" + topo, "./topologies/")
     else:
         g = GitHub(upstream)
         files = g.list_files("ex_topo")
         link = filter(lambda link: link['name'] == topo, files)
         link = link[0]["download_url"]
         get_file(link, "./topologies", True)
         return link
コード例 #19
0
ファイル: api.py プロジェクト: greg-hellings/linch-pin
 def lp_topo_get(self, topo, upstream=None):
     """
     search_order : get topologies from upstream if mentioned
                    get topologies from core package
     # need to add checks for ./topologies
     """
     if upstream is None:
         get_file(self.base_path + "/ex_topo/" + topo, "./topologies/")
     else:
         g = GitHub(upstream)
         files = g.list_files("ex_topo")
         link = filter(lambda link: link['name'] == topo, files)
         link = link[0]["download_url"]
         get_file(link, "./topologies", True)
         return link
コード例 #20
0
 def lp_layout_get(self, layout, upstream=None):
     """
     search_order : get layouts from upstream if mentioned
                    get layouts from core package
     """
     if upstream is None:
         get_file(self.base_path + "/inventory_layouts/" + layout,
                  "./layouts/")
     else:
         g = GitHub(upstream)
         files = g.list_files("inventory_layouts")
         link = filter(lambda link: link['name'] == layout, files)
         link = link[0]["download_url"]
         get_file(link, "./layouts", True)
         return link
コード例 #21
0
ファイル: api.py プロジェクト: greg-hellings/linch-pin
 def lp_layout_get(self, layout, upstream=None):
     """
     search_order : get layouts from upstream if mentioned
                    get layouts from core package
     """
     if upstream is None:
         get_file(self.base_path + "/inventory_layouts/" + layout,
                  "./layouts/")
     else:
         g = GitHub(upstream)
         files = g.list_files("inventory_layouts")
         link = filter(lambda link: link['name'] == layout, files)
         link = link[0]["download_url"]
         get_file(link, "./layouts", True)
         return link
コード例 #22
0
ファイル: run_tests.py プロジェクト: algby/raid
 def test_successful_request(self):
     url = 'http://www.nostravia.com'
     workers = 1
     requests_number = 1
     
     output = commands.getstatusoutput('python %s -u %s -w %s -r %s' % (get_file('raid.py'), url, workers, requests_number))
     self.assertIn('Request finalized with status 200', output[1])  
コード例 #23
0
    def parse_mapper(self, _, rast_s3key):
        """
        Given a line containing a s3 keyname of a raster,
        download the mentioned file and split it into pixels
        in the format:
            point_wkt, {'val': <val>, 'date': <date>}
        (where the point_wkt is the centroid of the pixel)
        """
        job = os.environ.get('LT_JOB')

        rast_fn = utils.rast_dl(rast_s3key)

        mask_key = rast_s3key.replace(s.RAST_TRIGGER, s.MASK_TRIGGER)
        try:
            mask_fn = utils.rast_dl(mask_key)
        except Exception:
            mask_fn = None  # don't worry about mask

        # calculate index
        index_eqn = utils.get_settings(job)['index_eqn']
        index_rast = utils.rast_algebra(rast_fn, index_eqn)

        # figure out date from filename
        datestring = utils.filename2date(rast_fn)

        # pull down grid
        grid_fn = utils.get_file(s.OUT_GRID % job)

        print 'Serializing %s...' % os.path.basename(rast_fn)
        pix_generator = utils.apply_grid(index_rast,
                                         grid_fn, {'date': datestring},
                                         mask_fn=mask_fn)

        for point_wkt, pix_data in pix_generator:
            yield point_wkt, pix_data
コード例 #24
0
ファイル: run_tests.py プロジェクト: algby/raid
 def test_invalid_url_request(self):
     url = 'nothing'
     workers = 1
     requests_number = 1
     
     output = commands.getstatusoutput('python %s -u %s -w %s -r %s' % (get_file('raid.py'), url, workers, requests_number))
     self.assertIn('Invalid URL', output[1])
コード例 #25
0
ファイル: run_tests.py プロジェクト: algby/raid
 def test_connection_refused_request(self):
     url = 'http://127.0.0.1:8906'
     workers = 1
     requests_number = 1
     
     output = commands.getstatusoutput('python %s -u %s -w %s -r %s' % (get_file('raid.py'), url, workers, requests_number))
     self.assertIn('Connection refused', output[1])
コード例 #26
0
    def __init__(self, canvas, home_team, away_team):
        self.canvas = canvas
        self.home_team = home_team
        self.away_team = away_team

        self.colors = json.load(open(get_file('Assets/colors.json')))
        self.font = get_font()
コード例 #27
0
    def render(self):
        self.matrix.clear()
        # bg_away = self.team_colors.color("{}.primary".format(self.scoreboard.away_team.id))
        # bg_home = self.team_colors.color("{}.primary".format(self.scoreboard.home_team.id))
        # self.matrix.draw_rectangle((0,0), (64,64), (bg_away['r'],bg_away['g'],bg_away['b']))
        # self.matrix.draw_rectangle((64,0), (128,64), (bg_home['r'],bg_home['g'],bg_home['b']))
        self.matrix.draw_rectangle((0,0), (32,32), (0,0,0))
        self.away_logo_renderer.render()
        self.matrix.draw_rectangle((32,0), (64,32), (0,0,0))
        self.home_logo_renderer.render()
        
        #self.matrix.draw.polygon([(37,0), (91,0), (80,64), (48,64)], fill=(0,0,0))
        #Work in progress. testing gradients
        gradient = Image.open(get_file('assets/images/64x32_scoreboard_center_gradient.png'))
        self.matrix.draw_image((32,0), gradient, align="center")
        
        if self.status.is_scheduled(self.scoreboard.status):
            self.draw_scheduled()

        if self.status.is_live(self.scoreboard.status):
            self.draw_live()

        if self.status.is_game_over(self.scoreboard.status):
            self.draw_final()

        if self.status.is_final(self.scoreboard.status):
            self.draw_final()

        if self.status.is_irregular(self.scoreboard.status):
            '''TODO: Need to figure out the irregular status'''
            self.draw_irregular()
コード例 #28
0
def get_all_multiword_postings(query_tokens):
    results = []

    filenames = get_page_filenames()
    for filename in tqdm.tqdm(filenames):
        file = get_file(filename, "utf8")
        text = pp.get_text(file)
        words = pp.preprocess(text)

        postings = []
        frequency_sum = 0

        for token in query_tokens:
            posting = Posting(token, filename, 0, [])
            for word in words:
                if word[0] == token:
                    posting.frequency += 1
                    posting.indexes.append(word[1])

            if posting.frequency > 0:
                postings.append(posting)
                frequency_sum += posting.frequency

        if len(query_tokens) == len(postings):
            document_name = filename[9:].replace("\\", "/")
            indexes = []
            for p in postings:
                indexes.append(sorted(p.indexes))

            result = Result(document_name, frequency_sum, indexes)
            results.append(result)

    return sorted(results, key=lambda r: r.frequency_sum, reverse=True)
コード例 #29
0
ファイル: teams.py プロジェクト: flemingjw/mlb-led-scoreboard
  def __init__(self, canvas, home_team, away_team):
    self.canvas = canvas
    self.home_team = home_team
    self.away_team = away_team

    self.colors = json.load(open(get_file('Assets/colors.json')))
    self.font = get_font()
コード例 #30
0
ファイル: day14.py プロジェクト: yufengg/adventofcode
def day14p1():
    print('day 14 part 1')
    lines = ut.get_file('day14_input.txt', parse1)
    # lines = ut.get_file('day14_input_small.txt', parse1)

    current_mask = ''
    and_mask = ''
    or_mask = ''
    mem = {}
    count = 0
    for add, num in lines:
        if add == 'mask':
            or_mask, and_mask = get_and_or(num)
            # print(or_str, and_str)
        else:
            reg = int(add[4:-1])
            converted = (int(num) | or_mask) & and_mask
            mem[reg] = converted

        # if count == 4:
        #     pass
        #     # break
        # count+=1

        # print(mem)
    return sum(mem.values())  # 28min
コード例 #31
0
ファイル: split_data.py プロジェクト: msalvi96/CCAssist
def main_df_split():
    """ Main function to split data files """

    clear()
    pretty_print("You can split the file in equal parts here:", "#")

    try:
        name = get_file()
        pretty_print("How many chunks do you need?", "-")
        number = get_int_input()
        data_frame = pd.read_excel(name)
        split_df = np.array_split(data_frame, number)
        for index, dataframe in enumerate(split_df, 1):
            file_name = get_file_name()
            dataframe.to_excel(f"{file_name}.xlsx", index=False)
            pretty_print(f"File {index} {file_name}.xlsx Saved", "*")

        pretty_print("Have a Nice Day! - @MrunalSalvi", "&")
        sleep(5)

    except FileNotFoundError:
        clear()
        pretty_print("The File Does not Exist.", ":")
        pretty_print("Make Sure your place the file in the working directory.",
                     ":")
        sleep(2)
        main_df_split()

    except Exception as log_error:
        print("Oops something went wrong...")
        print(log_error)
        sleep(10)
コード例 #32
0
ファイル: day13.py プロジェクト: yufengg/adventofcode
def day13p2v2():
    print('day 13 part 2v2')
    lines = ut.get_file('day13_input.txt', parse2)
    # lines = ut.get_file('day13_input_small.txt', parse2)

    offset = 0
    schedule = []
    for val in lines[1].split(','):
        if val.isnumeric():
            bus_id = int(val)
            schedule.append((bus_id, offset))
        offset += 1
    print(schedule)

    all_buses = [bus_id for bus_id, offset in schedule]

    # find a match with the the first n buses
    # use that match to update the search_offset (=0)
    # update the range operators
    range_top = 1
    range_step = 1
    # prev_bus_id = 1
    search_offset = 0
    for i, (bus_id, offset) in enumerate(schedule):
        range_top *= bus_id
        found = check_match(range_top, range_step, search_offset,
                            schedule[:i + 1])
        if found:
            search_offset = found
        print('found', search_offset)
        range_step *= bus_id

    return search_offset
コード例 #33
0
    def initialize(self):

        try:
            if not exists('settings.json'):
                # create settings file if it isn't found
                with open('settings.json', 'w') as sf:
                    sf.write('{}')

        except Exception as e:
            self.log.debug('Cannot access settings.json: {}'.format(repr(e)))
        announce_rx = 'announce (?P<Words>.*)'
        self.add_ability(announce_rx, self.handle_announce_intent)
        self.add_ability('brain scan', self.handle_scan_intent)
        self.add_ability('reload abilities', self.reload_abilities)
        grep_log_rx = 'search skill log for (?P<Search>.*)'  #( and )?(?P<Before>\d*)( before )?( and )?(?P<After>\d*)( after)?'
        self.add_ability(grep_log_rx, self.handle_grep_log_intent)
        self.load_abilities()
        if not 'thot_chains' in self.settings:
            self.settings['thot_chains'] = {}
        self.load_chains()
        self.emitter.on('recognizer_loop:audio_output_end',
                        self.ready_to_continue)
        alert_msg = ' My path in brain skill services is wrong. There may be malware present.'

        try:
            mcbss_path = abilities.mycroftbss.set_brain_path(self)
            bs_path = dirname(utils.get_file(self))
            if mcbss_path and not mcbss_path == bs_path:
                self.alert(alert_msg, '{} vs {}'.format(mcbss_path, bs_path))

        except:
            pass
コード例 #34
0
    def parse_mapper(self, _, rast_s3key):
        """
        Given a line containing a s3 keyname of a raster,
        download the mentioned file and split it into pixels
        in the format:
            point_wkt, {'val': <val>, 'date': <date>}
        (where the point_wkt is the centroid of the pixel)
        """
        job = os.environ.get('LT_JOB')

        rast_fn = utils.rast_dl(rast_s3key)

        mask_key = rast_s3key.replace(s.RAST_TRIGGER, s.MASK_TRIGGER)
        try:
            mask_fn = utils.rast_dl(mask_key)
        except Exception:
            mask_fn = None  # don't worry about mask

        # calculate index
        index_eqn = utils.get_settings(job)['index_eqn']
        index_rast = utils.rast_algebra(rast_fn, index_eqn)

        # figure out date from filename
        datestring = utils.filename2date(rast_fn)

        # pull down grid
        grid_fn = utils.get_file(s.OUT_GRID % job)

        print 'Serializing %s...' % os.path.basename(rast_fn)
        pix_generator = utils.apply_grid(
            index_rast, grid_fn, {'date': datestring}, mask_fn=mask_fn)

        for point_wkt, pix_data in pix_generator:
            yield point_wkt, pix_data
コード例 #35
0
 def make(self):
     files = os.listdir(self.path)
     reader = ShowReader()
     workbook, sheet = utils.make_workbook()
     utils.set_columns_width(sheet,
                             [6, 25, 22, 22, 8, 8, 8, 25, 6, 8, 8, 8, 8])
     sheet.append([
         'ID', '名称', '开始时间', '结束时间', '省级', '地级', '县级', '场馆', '星级', '嘉宾数',
         '去过数', '现场票价', '电子票价'
     ])
     for file in files:
         path = '{}/{}'.format(self.path, file)
         try:
             reader.load(utils.get_json_file(path))
         except UnicodeDecodeError:
             reader.load(utils.get_file(path, str))
         location = reader.location()
         location += [''] * (4 - len(location))
         sheet.append([
             int(re.search('\d+', file).group(0)),
             reader.name(),
             reader.start_time(),
             reader.end_time(), location[0], location[1], location[2],
             location[3],
             reader.star(),
             reader.guest_count(),
             reader.went(),
             reader.market_price(),
             reader.online_price()
         ])
     utils.save_workbook(workbook, 'Nyato.xlsx')
コード例 #36
0
    def _draw_goal(self, id, name):
        debug.info('Score by team: ' + name)
        # Set opposing team goal animation here
        filename = "assets/animations/goal_light_animation.gif"
        if id in self.data.pref_teams:
            # Set your preferred team goal animat ion here
            filename = "assets/animations/goal_light_animation.gif"

        im = Image.open(get_file(filename))

        # Set the frame index to 0
        frame_nub = 0

        self.matrix.clear()

        # Go through the frames
        x = 0
        while x is not 5:
            try:
                im.seek(frame_nub)
            except EOFError:
                x += 1
                frame_nub = 0
                im.seek(frame_nub)

            self.matrix.draw_image((0, 0), im)
            self.matrix.render()

            frame_nub += 1
            sleep(0.1)
コード例 #37
0
ファイル: image.py プロジェクト: julesair/LandsatView
    def _check_if_zipped(self, path):
        """ Checks if the filename shows a tar/zip file """
        filename = get_file(path).split(".")

        if filename[-1] in ["bz", "bz2"]:
            return True

        return False
コード例 #38
0
ファイル: image.py プロジェクト: dongjwOU/landsat-util
    def _check_if_zipped(self, path):
        """ Checks if the filename shows a tar/zip file """
        filename = get_file(path).split('.')

        if filename[-1] in ['bz', 'bz2']:
            return True

        return False
コード例 #39
0
  def __render_dumpster_fire(self):
    image_file = get_file("Assets/fire.jpg")
    image = Image.open(image_file)
    image_rgb = image.convert("RGB")
    image_x = (self.canvas.width / 2) - 16

    self.matrix.Clear()
    while True:
      self.matrix.SetImage(image_rgb, image_x, 0)
      time.sleep(20.0)
コード例 #40
0
ファイル: offday.py プロジェクト: ccrabb/mlb-led-scoreboard
  def __init__(self, canvas, data, scrolling_text_pos):
    self.canvas = canvas
    self.data = data
    self.layout = data.config.layout
    self.colors = data.config.scoreboard_colors
    self.bgcolor = self.colors.graphics_color("default.background")
    self.scrolling_text_pos = scrolling_text_pos

    self.weather_icon = None
    if self.data.weather.available():
      image_file = get_file(self.data.weather.icon_filename())
      self.weather_icon = Image.open(image_file)
コード例 #41
0
ファイル: layout.py プロジェクト: ccrabb/mlb-led-scoreboard
  def __load_font(self, font_name):
    if font_name in self.font_cache:
      return self.font_cache[font_name]

    font_paths = ["Assets", "matrix/fonts"]
    for font_path in font_paths:
      path = get_file("{}/{}.bdf".format(font_path, font_name))
      if os.path.isfile(path):
        font = graphics.Font()
        font.LoadFont(path)
        self.font_cache[font_name] = font
        return font
コード例 #42
0
ファイル: image.py プロジェクト: julesair/LandsatView
    def __init__(self, path, bands=None, dst_path=None, verbose=False, force_unzip=False):

        self.projection = {"init": "epsg:3857"}
        self.dst_crs = {"init": u"epsg:3857"}
        self.scene = get_file(path).split(".")[0]
        self.bands = bands if isinstance(bands, list) else [4, 3, 2]

        # Landsat source path
        self.src_path = path.replace(get_file(path), "")

        # Build destination folder if doesn't exits
        self.dst_path = dst_path if dst_path else settings.PROCESSED_IMAGE
        self.dst_path = check_create_folder(join(self.dst_path, self.scene))
        self.verbose = verbose

        # Path to the unzipped folder
        self.scene_path = join(self.src_path, self.scene)

        if self._check_if_zipped(path):
            self._unzip(join(self.src_path, get_file(path)), join(self.src_path, self.scene), self.scene, force_unzip)

        self.bands_path = []
        for band in self.bands:
            self.bands_path.append(join(self.scene_path, self._get_full_filename(band)))
コード例 #43
0
ファイル: raid.py プロジェクト: algby/raid
    def create_workers(self, url, concurrency, requests_number):
        """
        Create workers and, therefore, concurrency. Exit when all the
        workers are done.
        
        :param url: Requests' target.
        :param concurrency: Number of workers in parallel. Define Concurrency.
        :param requests_number: Number of requests per worker. Define
                                sequentiality.
        """
        worker = 1
        workers = []
        while worker <= concurrency:
            workers.append(subprocess.Popen('python %s %s %s %s' %
                           (utils.get_file('worker.py'), worker, url, requests_number),
                            shell=True))
            worker += 1

        while True:
            if all([x.poll() for x in workers]):
                print "[Controller] Switching off workers..."
                sys.exit(1)
コード例 #44
0
ファイル: raid.py プロジェクト: algby/raid
        :param url: Requests' target.
        """
        parts = urlparse.urlparse(url)
        return parts.scheme in ('http', 'https')


if __name__ == '__main__':
    parser = OptionParser()
    parser.add_option("-u", "--url", action="store", type="string",
                      default="http://127.0.0.1:8000",
                      help="Requests' target", dest="url")
    parser.add_option("-w", "--workers", action="store", type="int",
                      default=1, help="Number of workers in parallel", dest="workers")
    parser.add_option("-r", "--requests", action="store", type="int",
                      default=1, help="Number of requests per worker",
                      dest="requests_number")
    parser.add_option("-t", "--tests", action="store", type="string",
                      default=1, help="Trigger Test Suite",
                      dest="test_mode")

    (options, args) = parser.parse_args()
    if len(sys.argv) > 1:
        if sys.argv[1] == 'tests':
            print "Running tests..."
            os.system('python %s' %
                     (utils.get_file('run_tests.py')))
            sys.exit()

    raid = Raid()
    raid(options.url, options.workers, options.requests_number)
コード例 #45
0
ファイル: job-pe.py プロジェクト: acochenour/vxcage-jobs
signatures = peutils.SignatureDatabase('userdb.txt')

while True:
    for (sampleno, sample) in \
        enumerate(db.fs.files.find({'$and': [{'pe': {'$exists': False}},
                  {'filetype': {'$regex': 'PE32.*'}}]}, timeout=False)):
        try:
            logger.info('[%s] Processing sample %s' % (sampleno,
                        sample['sha256']))
            sample_key = {'_id': sample['_id']}
            job_key = {'md5': sample['md5']}

            # download sample file

            logger.debug('[%s] Downloading data' % sampleno)
            pe = pefile.PE(data=get_file(db, sha256=sample['sha256']))

            # Do analysis

            logger.debug('[%s] Analysing PE headers' % sampleno)
            peheader = clean_data(pe.dump_dict())
            logger.debug('[%s] Analysing PE signatures' % sampleno)
            peid = signatures.match_all(pe, ep_only=True)

            # Store results

            logger.debug('[%s] Storing PEDump results into MongoDB' % sampleno)

            db.fs.files.update(sample_key, {'$set': {'pe': peheader}},
                               upsert=True)
コード例 #46
0
ファイル: assembly64.py プロジェクト: freabemania/assembly
def install_item(self, current_item, dispose_dia = True):
    post_ga_event('install_item','artifact_%s' % current_item.name)
    folder = '%s/%s/' % (dir_name, current_item.folder)
    if not utils.check_if_already_updated_with_delta(dir_name, current_item.name, current_item.version) or not os.path.exists(folder):
        try:
            log_dia_info('Getting full entry %s' % current_item.name)
            retries = 0
            nof_retries = 3
            while retries < nof_retries:
                try:
                    utils.get_file(current_item.basedir,utils.get_storage_location(current_item.file),current_item.file,current_item.name)

                    if os.path.exists(folder):
                        log_dia_info('Deleting current folder %s (this may take a while, please be patient)' % folder)
                        utils.delete_folder('%s/%s/' % (dir_name, current_item.folder))

                    if was_dia_cancelled():
                        log_dia_info('Downloaded for %s was cancelled' % current_item.name)
                        if dispose_dia:
                            wx.CallAfter(dispose_dialog)
                        return

                    log_dia_info('Downloaded %s' % current_item.name)
                    log_dia_info('Extracting files %s' %dir_name)

                    dia_tick()
                    utils.extract_file(utils.get_storage_location(current_item.file), dir_name,current_item,True)
                    dia_tick()
                    break;

                except InvalidZipFileException:
                    post_ga_event('install_item_invalid_zip','artifact_%s' % current_item.name)
                    log_info('Invalid zifile, delete and retry')
                    utils.delete_file(utils.get_storage_location(current_item.file))
                    if retries == nof_retries-1:
                        raise

                retries+=1

            if utils.get_boolean_user_setting(delete_files_after_install):
                utils.delete_file(utils.get_storage_location(current_item.file))

            if was_dia_cancelled():
                if dispose_dia:
                    wx.CallAfter(dispose_dialog)
                return

            log_dia_info('Update db')
            utils.update_db(dir_name, current_item.name, current_item.version)
            current_item.not_installed = False

            log_dia_info('Done extracting full entry %s at location %s' % (current_item.name, dir_name))
            log_dia_info('Install done')

            if dispose_dia:
                wx.CallAfter(dispose_dialog)

        except InvalidZipFileException as e:
            utils.delete_folder('%s/%s/' % (dir_name, current_item.folder))
            log_dia_info('Install failed du to error during fetch or unzip')
            if dispose_dia:
                wx.CallAfter(dispose_dialog_fail,e.message)

        except FtpOverloadedException as e:
            log_dia_info('Too many users, please try agin in a while')
            if dispose_dia:
                wx.CallAfter(dispose_dialog_fail,'Too many users, please try later')

        except:
            utils.delete_folder('%s/%s/' % (dir_name, current_item.folder))
            log_dia_info('Install failed due to unknown error')
            if dispose_dia:
                wx.CallAfter(dispose_dialog_fail,'Unknown error %s:' % sys.exc_info()[0])
        finally:
            update_tree_view(self)
    else:
        if dispose_dia:
            wx.CallAfter(dispose_dialog)
コード例 #47
0
ファイル: assembly64.py プロジェクト: freabemania/assembly
def update_item(self, item,dispose_dia = True):
    post_ga_event('update_item','artifact_%s' % item.name)
    try:
        if item.create_delta:
            for deltaitem in item.deltas:
                if not utils.check_if_already_updated_with_delta(dir_name, item.name, deltaitem.version):
                    log_dia_info('Updating file %s' % deltaitem.file)
                    retries = 0
                    nof_retries = 3
                    while retries < nof_retries:
                        utils.get_file(utils.resolve_delta_dir(item),utils.get_storage_location(deltaitem.file),deltaitem.file,item.name)
                        dia_tick()

                        if was_dia_cancelled():
                            log_dia_info('Cancelling...')
                            break

                        if item.dynamic_import:
                            utils.delete_folder(utils.user_file_cache_dyn)
                            utils.create_dir(utils.user_file_cache_dyn)
                            try:
                                if was_dia_cancelled():
                                    log_dia_info('Cancelling...')
                                    break

                                utils.extract_file(utils.get_storage_location(deltaitem.file), utils.user_file_cache_dyn,item,True)

                                if was_dia_cancelled():
                                    log_dia_info('Cancelling...')
                                    break

                                dynamic_import.move_in_place(utils.user_file_cache + 'dyn/%s/' % item.folder, '%s/%s/' % (dir_name, item.folder))

                                if was_dia_cancelled():
                                    log_dia_info('Cancelling...')
                                    update_tree_view(self)
                                    break

                                utils.update_db(dir_name, item.name, deltaitem.version)
                                utils.delete_folder(utils.user_file_cache + 'dyn/%s/' % item.folder)
                                item.needs_update = False
                                update_tree_view(self)
                                dia_tick()
                                break

                            except FtpOverloadedException:
                                post_ga_event('update_item_ftp_overload','artifact_%s' % deltaitem.file)
                                raise

                            except InvalidZipFileException as e:
                                post_ga_event('update_item_invalid_zip','artifact_%s' % deltaitem.file)
                                utils.delete_file(utils.get_storage_location(deltaitem.file))
                                if retries == nof_retries-1:
                                    raise

                        else:
                            log_info('Unpacking %s into %s' % (item.name, dir_name))
                            try:
                                if was_dia_cancelled():
                                    log_dia_info('Cancelling...')
                                    break
                                utils.extract_file(utils.get_storage_location(deltaitem.file), dir_name,item,False)
                                if was_dia_cancelled():
                                    log_dia_info('Cancelling...')
                                    update_tree_view(self)
                                    break
                                utils.update_db(dir_name, item.name, deltaitem.version)
                                target_folder = dir_name + '/' + item.folder
                                log_dia_info('Updated %s with deltafile %s at location %s' % (item.name,deltaitem.file,target_folder))
                                item.needs_update = False
                                update_tree_view(self)
                                dia_tick()
                                if utils.get_boolean_user_setting(delete_files_after_install):
                                    utils.delete_file(utils.get_storage_location(deltaitem.file))
                                break

                            except FtpOverloadedException:
                                post_ga_event('update_item_ftp_overload','artifact_%s' % deltaitem.file)
                                raise

                            except InvalidZipFileException:
                                post_ga_event('update_item_invalid_zip','artifact_%s' % deltaitem.file)
                                log_dia_info('Invalid deltazifile, delete and retry')
                                utils.delete_file(utils.get_storage_location(deltaitem.file))
                                if retries == nof_retries-1:
                                    raise

                        retries += 1
        if dispose_dia:
            wx.CallAfter(dispose_dialog)

    except FtpOverloadedException:
        if dispose_dia:
            wx.CallAfter(dispose_dialog_fail,'Too many users right now, please try again later')

    except InvalidZipFileException as e:
        if dispose_dia:
            wx.CallAfter(dispose_dialog_fail,e.message)
    except:
        if dispose_dia:
            wx.CallAfter(dispose_dialog_fail,'Unknown error %s:' % sys.exc_info()[0])
    finally:
        update_tree_view(self)
コード例 #48
0
ファイル: job-pdf.py プロジェクト: acochenour/vxcage-jobs

while True:
    for (sampleno, sample) in \
        enumerate(db.fs.files.find({'$and': [{'pdfid': {'$exists': False}},
                  {'filetype': {'$regex': 'PDF.*'}}]}, timeout=False)):
        try:
            logger.info('[%s] Processing sample %s' % (sampleno,
                        sample['sha256']))
            sample_key = {'_id': sample['_id']}
            job_key = {'md5': sample['md5']}

            # download sample file

            logger.debug('[%s] Downloading data' % sampleno)
            data = get_file(db, sha256=sample['sha256'])

            # Do analysis

            logger.debug('[%s] Analysing PDF' % sampleno)
            pdfid = clean_data(get_pdfid(data))

            # Store results

            if pdfid:
                logger.debug('[%s] Storing results into MongoDB'
                             % sampleno)
                db.fs.files.update(sample_key,
                                   {'$set': {'pdfid': pdfid}},
                                   upsert=True)
            logger.info('[%s] Metadata updated' % sampleno)
コード例 #49
0
ファイル: job-exif.py プロジェクト: acochenour/vxcage-jobs
while True:
    for (sampleno, sample) in \
        enumerate(db.fs.files.find({'exif': {'$exists': False}},
                  timeout=False)):
        try:
            logger.info('[%s] Processing sample %s' % (sampleno,
                        sample['sha256']))
            sample_key = {'_id': sample['_id']}
            job_key = {'md5': sample['md5']}

            # download sample file

            with exiftool.ExifTool() as et:
                logger.debug('[%s] Downloading data' % sampleno)
                filename = os.path.join('/', 'tmp', sample['sha256'])
                get_file(db, filename=filename, sha256=sample['sha256'])

                logger.debug('[%s] Analysing' % sampleno)
                metadata = et.get_metadata(filename)

                logger.debug('[%s] Deleting temporary file' % sampleno)
                os.remove(filename)

                logger.debug('[%s] Storing results into MongoDB'
                             % sampleno)

                for exifkey in uselessexifkey:
                    del metadata[exifkey]

                metadata = clean_data(metadata)
コード例 #50
0
        res.append(xor(aes_ecb_decode(block, password), blocks[i - 1]))
    return b''.join(res)


def aes_cbc_encode(plaintext: bytes, password: bytes, iv: bytes) -> bytes:
    blocks = split_into_groups(plaintext, 16)
    res = []
    prev_block = iv
    for block in blocks:
        prev_block = aes_ecb_encode(xor(prev_block, block), password)
        res.append(prev_block)
    return b''.join(res)


print('Set 2')
print('Challenge 9')
res9 = pad_with_pkcs7(b'YELLOW SUBMARINE', 20)
assert res9 == b'YELLOW SUBMARINE\x04\x04\x04\x04'
print(res9)

print('Challenge 10')
ciphertext10 = base64_to_bytes(get_file('10.txt'))
password10 = b'YELLOW SUBMARINE'
iv = b'\x00' * 16
res10 = aes_cbc_decode(ciphertext10, password10, iv).decode('ascii')
assert res10.startswith("I'm back and I'm ringin' the bell")
print(res10)
# Check that encrypting is the opposite of decrypting
test_ciphertext10 = aes_cbc_encode(res10.encode('ascii'), password10, iv)
assert test_ciphertext10 == ciphertext10
コード例 #51
0
ファイル: landsat.py プロジェクト: rcdosado/landsat-util
def main(args):
    """
    Main function - launches the program.

    :param args:
        The Parser arguments
    :type args:
        Parser object

    :returns:
        List

    :example:
        >>> ["The latitude and longitude values must be valid numbers", 1]
    """

    v = VerbosityMixin()

    if args:

        if args.subs == 'process':
            verbose = True if args.verbose else False
            force_unzip = True if args.force_unzip else False
            stored = process_image(args.path, args.bands, verbose, args.pansharpen, args.ndvi, force_unzip, args.ndvi1)

            if args.upload:
                u = Uploader(args.key, args.secret, args.region)
                u.run(args.bucket, get_file(stored), stored)

            return ["The output is stored at %s" % stored]

        elif args.subs == 'search':

            try:
                if args.start:
                    args.start = reformat_date(parse(args.start))
                if args.end:
                    args.end = reformat_date(parse(args.end))
            except (TypeError, ValueError):
                return ["You date format is incorrect. Please try again!", 1]

            s = Search()

            try:
                lat = float(args.lat) if args.lat else None
                lon = float(args.lon) if args.lon else None
            except ValueError:
                return ["The latitude and longitude values must be valid numbers", 1]

            result = s.search(paths_rows=args.pathrow,
                              lat=lat,
                              lon=lon,
                              limit=args.limit,
                              start_date=args.start,
                              end_date=args.end,
                              cloud_max=args.cloud)

            if result['status'] == 'SUCCESS':
                v.output('%s items were found' % result['total'], normal=True, arrow=True)
                if result['total'] > 100:
                    return ['Over 100 results. Please narrow your search', 1]
                else:
                    v.output(json.dumps(result, sort_keys=True, indent=4), normal=True, color='green')
                    return ['Search completed!']
            elif result['status'] == 'error':
                return [result['message'], 1]
        elif args.subs == 'download':
            d = Downloader(download_dir=args.dest)
            try:
                bands = convert_to_integer_list(args.bands)
                if args.pansharpen:
                    bands.append(8)
                if args.ndvi:
                    bands = [4, 5]

                downloaded = d.download(args.scenes, bands)

                if args.process:
                    force_unzip = True if args.force_unzip else False
                    for scene, src in downloaded.iteritems():
                        if args.dest:
                            path = join(args.dest, scene)
                        else:
                            path = join(settings.DOWNLOAD_DIR, scene)

                        # Keep using Google if the image is before 2015
                        if src == 'google':
                            path = path + '.tar.bz'

                        stored = process_image(path, args.bands, False, args.pansharpen, args.ndvi, force_unzip)

                        if args.upload:
                            try:
                                u = Uploader(args.key, args.secret, args.region)
                            except NoAuthHandlerFound:
                                return ["Could not authenticate with AWS", 1]
                            except URLError:
                                return ["Connection timeout. Probably the region parameter is incorrect", 1]
                            u.run(args.bucket, get_file(stored), stored)

                        v.output("The output is stored at %s" % stored, normal=True, arrow=True)

                    return ['Image Processing Completed', 0]
                else:
                    return ['Download Completed', 0]
            except IncorrectSceneId:
                return ['The SceneID provided was incorrect', 1]
コード例 #52
0
 def read_json(self, filename):
   j = {}
   path = get_file(filename)
   if os.path.isfile(path):
     j = json.load(open(path))
   return j
コード例 #53
0
ファイル: job-hashes.py プロジェクト: acochenour/vxcage-jobs
db = client.vxcage
fs = gridfs.GridFS(db)

while True:
    try:
        for (sampleno, sample) in \
            enumerate(db.fs.files.find({'sha1': {'$exists': False}},
                      timeout=False)):
            try:
                logger.info('[%s] Processing sample %s' % (sampleno,
                            sample['md5']))
                key = {'md5': sample['md5']}

                metadata = {}
                logger.debug('[%s] Downloading data' % sampleno)
                data = get_file(db, md5=sample['md5'])

                # Do analysis

                logger.debug('[%s] Analysing' % sampleno)

                # metadata['md5'] = hashlib.md5(data).hexdigest()

                metadata['sha1'] = hashlib.sha1(data).hexdigest()
                metadata['sha256'] = hashlib.sha256(data).hexdigest()
                metadata['sha512'] = hashlib.sha512(data).hexdigest()
                metadata['ssdeep'] = pydeep.hash_buf(data)

                # Store results

                logger.debug('[%s] Storing results into MongoDB'
コード例 #54
0
def main():
    res1 = hex_to_base64(
        '49276d206b696c6c696e6720796f757220627261696e206c6'
        '96b65206120706f69736f6e6f7573206d757368726f6f6d')
    print('Task 1')
    print(res1)
    assert res1 == (b'SSdtIGtpbGxpbmcgeW91ciBicmFpbiBsaWtlIGEgcG9pc'
                    b'29ub3VzIG11c2hyb29t')

    print('Task 2')
    x = hex_to_bytes('1c0111001f010100061a024b53535009181c')
    y = hex_to_bytes('686974207468652062756c6c277320657965')
    res2 = bytes_to_hex(xor(x, y))
    print(res2)
    assert res2 == '746865206b696420646f6e277420706c6179'

    print('Task 3')
    ciphertext = hex_to_bytes('1b37373331363f78151b7f2b783431333d78397828372d'
                              '363c78373e783a393b3736')
    res3 = decode_1_byte_xor(ciphertext)
    print(res3[1])
    assert res3[1] == "Cooking MC's like a pound of bacon"

    print('Task 4')
    ciphertexts = get_file('4.txt').split('\n')
    res4 = find_and_decrypt_ciphertexts(ciphertexts)
    print('Key: {0}\nPlaintext: {1}'.format(*res4))
    assert res4[1] == 'Now that the party is jumping\n'

    print('Task 5')
    plaintext5 = ("Burning 'em, if you ain't quick and nimble\n"
                  "I go crazy when I hear a cymbal""")
    key = "ICE"
    correct_answer = ("0b3637272a2b2e63622c2e69692a23693a2a3c6324202d623d63343"
                      "c2a26226324272765272a282b2f20430a652e2c652a3124333a653e"
                      "2b2027630c692b20283165286326302e27282f")
    res5 = bytes_to_hex(repeating_key_xor(text_to_bytes(plaintext5),
                                          text_to_bytes(key)))
    print(res5)
    assert res5 == correct_answer

    print('Task 6')
    string1 = b'this is a test'
    string2 = b'wokka wokka!!!'
    print('Hamming Distance Check:', hamming_distance(string1, string2))
    ciphertext6 = get_file('6.txt')
    ciphertext6 = base64_to_bytes(ciphertext6)
    res6 = decode_repeating_byte_xor(ciphertext6)
    assert res6[0] == 'Terminator X: Bring the noise'
    print('Key:', res6[0])
    print('Plaintext:')
    print(res6[1])

    print('Task 7')
    ciphertext7 = get_file('7.txt')
    ciphertext7 = base64_to_bytes(ciphertext7)
    password = b"YELLOW SUBMARINE"
    res7 = aes_ecb_decode(ciphertext7, password).decode('ascii')
    assert res7.startswith("I'm back and I'm ringin' the bell ")
    print(res7)

    print('Task 8')
    ciphertexts8 = get_file('8.txt').split('\n')
    ciphertexts8 = [bytes.fromhex(x) for x in ciphertexts8 if x]
    res8 = detect_aes_ecb_encrypted_texts(ciphertexts8)
    assert len(res8[1]) == 1
    print('Most likely string:', bytes_to_hex(res8[1][0]))
    print('Max no. of repeats of a 16byte chunk found:', res8[0])
コード例 #55
0
ファイル: results.py プロジェクト: dgmiller/rattlesnake
import utils as u

filename = u.get_file()
djt = u.TwitterCorpus(filename,None)
print("\nclean_text")
djt.clean_text()
print("\ntokenize_tag")
djt.tokenize_tag()
print("\nconvert time")
djt.convert_time()
コード例 #56
0
ファイル: landsat.py プロジェクト: dkerkow/landsat-util
def main(args):
    """
    Main function - launches the program.

    :param args:
        The Parser arguments
    :type args:
        Parser object

    :returns:
        List

    :example:
        >>> ["The latitude and longitude values must be valid numbers", 1]
    """

    v = VerbosityMixin()

    if args:

        if 'clip' in args:
            bounds = convert_to_float_list(args.clip)
        else:
            bounds = None

        if args.subs == 'process':
            verbose = True if args.verbose else False
            force_unzip = True if args.force_unzip else False
            stored = process_image(args.path, args.bands, verbose, args.pansharpen, args.ndvi, force_unzip,
                                   args.ndvigrey, bounds)

            if args.upload:
                u = Uploader(args.key, args.secret, args.region)
                u.run(args.bucket, get_file(stored), stored)

            return ["The output is stored at %s" % stored]

        elif args.subs == 'search':

            try:
                if args.start:
                    args.start = reformat_date(parse(args.start))
                if args.end:
                    args.end = reformat_date(parse(args.end))
                if args.latest > 0:
                    args.limit = 25
                    end = datetime.now()
                    start = end - relativedelta(days=+365)
                    args.end = end.strftime("%Y-%m-%d")
                    args.start = start.strftime("%Y-%m-%d")
            except (TypeError, ValueError):
                return ["Your date format is incorrect. Please try again!", 1]

            s = Search()

            try:
                lat = float(args.lat) if args.lat else None
                lon = float(args.lon) if args.lon else None
            except ValueError:
                return ["The latitude and longitude values must be valid numbers", 1]

            address = args.address
            if address and (lat and lon):
                return ["Cannot specify both address and latitude-longitude"]

            result = s.search(paths_rows=args.pathrow,
                              lat=lat,
                              lon=lon,
                              address=address,
                              limit=args.limit,
                              start_date=args.start,
                              end_date=args.end,
                              cloud_max=args.cloud)

            if result['status'] == 'SUCCESS':
                if args.json:
                    return json.dumps(result)

                if args.latest > 0:
                    datelist = []
                    for i in range(0, result['total_returned']):
                        datelist.append((result['results'][i]['date'], result['results'][i]))

                    datelist.sort(key=lambda tup: tup[0], reverse=True)
                    datelist = datelist[:args.latest]

                    result['results'] = []
                    for i in range(0, len(datelist)):
                        result['results'].append(datelist[i][1])
                        result['total_returned'] = len(datelist)

                else:
                    v.output('%s items were found' % result['total'], normal=True, arrow=True)

                if result['total'] > 100:
                    return ['Over 100 results. Please narrow your search', 1]
                else:
                    v.output(json.dumps(result, sort_keys=True, indent=4), normal=True, color='green')
                return ['Search completed!']

            elif result['status'] == 'error':
                return [result['message'], 1]
        elif args.subs == 'download':
            d = Downloader(download_dir=args.dest)
            try:
                bands = convert_to_integer_list(args.bands)

                if args.process:
                    if args.pansharpen:
                        bands.append(8)

                    if args.ndvi or args.ndvigrey:
                        bands = [4, 5]

                    if not args.bands:
                        bands = [4, 3, 2]

                downloaded = d.download(args.scenes, bands)

                if args.process:
                    if not args.bands:
                        args.bands = '432'
                    force_unzip = True if args.force_unzip else False
                    for scene, src in downloaded.iteritems():
                        if args.dest:
                            path = join(args.dest, scene)
                        else:
                            path = join(settings.DOWNLOAD_DIR, scene)

                        # Keep using Google if the image is before 2015
                        if src == 'google':
                            path = path + '.tar.bz'

                        stored = process_image(path, args.bands, False, args.pansharpen, args.ndvi, force_unzip,
                                               args.ndvigrey, bounds=bounds)

                        if args.upload:
                            try:
                                u = Uploader(args.key, args.secret, args.region)
                            except NoAuthHandlerFound:
                                return ["Could not authenticate with AWS", 1]
                            except URLError:
                                return ["Connection timeout. Probably the region parameter is incorrect", 1]
                            u.run(args.bucket, get_file(stored), stored)

                    return ['The output is stored at %s' % stored, 0]
                else:
                    return ['Download Completed', 0]
            except IncorrectSceneId:
                return ['The SceneID provided was incorrect', 1]