コード例 #1
0
    def execute_parsed_command(self, command, *args, **kargs):
        """Execute an already parsed command, at the server-side."""
        if not command:
            return
        start_time = time.time()
        result = self._server_proxy.execute(command, *args, **kargs)
        end_time = time.time()

        output_handler.output_printer(result)
        utils.print_elapsed_time(start_time, end_time)
コード例 #2
0
def fix_plate_readings(inputFile, barcode):
    #def fix_plate_readings(inputFile):
    """
    Given a MS Excel file with a table (readings from a 386-well plate), rotates the
    well positions clockwise by the indicated degree.

    Keyword arguments:
    :param inputFile: path to MS Excel file (no default)
    :param barcode: plate barcode (no default)
    :param loessSpan: degree of rotation of table (default: 180)
    :return: Table in the original, rotated and tidy (vertical) formats in a MS Excel file
    """
    t0 = time.time()

    # Read table from MS Excel file and get file path details
    table = pd.read_excel(io=inputFile, sheetname=0, header=0, index_col=0)
    dir_name, file_name, extless_filename = utils.get_filename_from_path(
        inputFile)
    print('Read table from file:\n"{}"'.format(file_name))
    print()

    # Rotate table 180 degrees
    rotated_table = utils.rotate_table(df=table)

    # Get barcode from user
    # Added as CLI argument instead
    #print()
    #barcode = (input(Fore.RED + 'What is the plate\'s barcode: '))
    #print(Style.RESET_ALL)

    # Convert to tidy format
    tidy_table = utils.rectangular_to_tidy(df=rotated_table, barcode=barcode)

    # Write to MS Excel file
    file_name = os.path.basename(file_name)
    writer = pd.ExcelWriter(dir_name + '/' + extless_filename + '_final.xlsx')
    tidy_table.to_excel(writer, 'Final table', index=False)
    rotated_table.to_excel(writer, 'Rotated table')
    table.to_excel(writer, 'Original table')
    writer.save()

    print()
    print(Fore.CYAN +
          'Saved final tables to MS Excel file:\n"{}_final.xlsx"'.format(
              extless_filename))
    print()
    utils.print_elapsed_time(t0)
コード例 #3
0
ファイル: symbiotic.py プロジェクト: chubbymaggie/symbiotic
    def perform_slicing(self):
        # run optimizations that can make slicing more precise
        opt = get_optlist_before(self.options.optlevel)
        if opt:
            self.optimize(passes=opt)

        # if this is old slicer run, we must find the starting functions
        # (this adds the __ai_init_funs global variable to the module)
        # NOTE: must be after the optimizations that could remove it
        if self.options.old_slicer:
            self.old_slicer_find_init()

        # break the infinite loops just before slicing
        # so that the optimizations won't make them syntactically infinite again
        #self.run_opt(['-reg2mem', '-break-infinite-loops', '-remove-infinite-loops',
        self.run_opt([
            '-break-infinite-loops',
            '-remove-infinite-loops',
            # this somehow break the bitcode
            #'-mem2reg'
        ])

        # print info about time
        print_elapsed_time('INFO: Compilation, preparation and '\
                           'instrumentation time')

        for n in range(0, self.options.repeat_slicing):
            dbg('Slicing the code for the {0}. time'.format(n + 1))
            add_params = []
            #if n == 0 and self.options.repeat_slicing > 1:
            #    add_params = ['-pta-field-sensitive=8']

            self.slicer(self.options.slicing_criterion, add_params)

            if self.options.repeat_slicing > 1:
                opt = get_optlist_after(self.options.optlevel)
                if opt:
                    self.optimize(passes=opt)
                    self.run_opt(
                        ['-break-infinite-loops', '-remove-infinite-loops'])

        print_elapsed_time('INFO: Total slicing time')

        # new slicer removes unused itself, but for the old slicer
        # we must do it manually (this calls the new slicer ;)
        if self.options.old_slicer:
            self.remove_unused_only()
コード例 #4
0
ファイル: get_sentinel2.py プロジェクト: mazzma12/tsd
def get_time_series(aoi,
                    start_date=None,
                    end_date=None,
                    bands=['B04'],
                    out_dir='',
                    search_api='devseed',
                    product_type=None,
                    parallel_downloads=multiprocessing.cpu_count()):
    """
    Main function: crop and download a time series of Sentinel-2 images.
    """
    utils.print_elapsed_time.t0 = datetime.datetime.now()

    # list available images
    if search_api == 'devseed':
        if product_type is not None:
            print(
                "WARNING: product_type option is available only with search_api='scihub'"
            )
        images = search_devseed.search(aoi, start_date, end_date,
                                       'Sentinel-2')['results']
    elif search_api == 'scihub':
        import search_scihub
        if product_type is not None:
            product_type = 'S2MSI{}'.format(product_type[1:])
        images = search_scihub.search(aoi,
                                      start_date,
                                      end_date,
                                      satellite='Sentinel-2',
                                      product_type=product_type)
    elif search_api == 'planet':
        if product_type is not None:
            print(
                "WARNING: product_type option is available only with search_api='scihub'"
            )
        import search_planet
        images = search_planet.search(aoi,
                                      start_date,
                                      end_date,
                                      item_types=['Sentinel2L1C'])

    # sort images by acquisition date, then by mgrs id
    images.sort(
        key=lambda k: date_and_mgrs_id_from_metadata_dict(k, search_api))

    # remove duplicates (same acquisition day, different mgrs tile id)
    seen = set()
    images = [
        x for x in images if not (
            date_and_mgrs_id_from_metadata_dict(x, search_api)[0] in seen
            or  # seen.add() returns None
            seen.add(date_and_mgrs_id_from_metadata_dict(x, search_api)[0]))
    ]
    print('Found {} images'.format(len(images)))
    utils.print_elapsed_time()

    # choose wether to use http or s3
    if WE_CAN_ACCESS_AWS_THROUGH_S3:
        aws_url_from_metadata_dict = aws_s3_url_from_metadata_dict
    else:
        aws_url_from_metadata_dict = aws_http_url_from_metadata_dict

    # build urls, filenames and crops coordinates
    crops_args = []
    for img in images:
        url_base = aws_url_from_metadata_dict(img, search_api)
        name = filename_from_metadata_dict(img, search_api)
        coords = utils.utm_bbx(
            aoi,  # convert aoi coordates to utm
            utm_zone=int(utm_zone_from_metadata_dict(img, search_api)),
            r=60)  # round to multiples of 60 (B01 resolution)
        for b in bands:
            fname = os.path.join(out_dir, '{}_band_{}.tif'.format(name, b))
            if 'MSIL2A' in title_from_metadata_dict(img, search_api):
                url = '{}/R{}m/{}.jp2'.format(url_base, band_resolution(b), b)
            else:
                url = '{}/{}.jp2'.format(url_base, b)
            crops_args.append((fname, url, *coords))

    # download crops
    utils.mkdir_p(out_dir)
    print('Downloading {} crops ({} images with {} bands)...'.format(
        len(crops_args), len(images), len(bands)),
          end=' ')
    parallel.run_calls(utils.crop_with_gdal_translate,
                       crops_args,
                       extra_args=('UInt16', ),
                       pool_type='threads',
                       nb_workers=parallel_downloads)
    utils.print_elapsed_time()

    # discard images that failed to download
    images = [
        x for x in images
        if bands_files_are_valid(x, bands, search_api, out_dir)
    ]
    # discard images that are totally covered by clouds
    utils.mkdir_p(os.path.join(out_dir, 'cloudy'))
    urls = [aws_http_url_from_metadata_dict(img, search_api) for img in images]
    print('Reading {} cloud masks...'.format(len(urls)), end=' ')
    cloudy = parallel.run_calls(
        is_image_cloudy_at_location,
        urls,
        extra_args=(utils.geojson_lonlat_to_utm(aoi), ),
        pool_type='threads',
        nb_workers=parallel_downloads,
        verbose=True)
    for img, cloud in zip(images, cloudy):
        name = filename_from_metadata_dict(img, search_api)
        if cloud:
            for b in bands:
                f = '{}_band_{}.tif'.format(name, b)
                shutil.move(os.path.join(out_dir, f),
                            os.path.join(out_dir, 'cloudy', f))
    print('{} cloudy images out of {}'.format(sum(cloudy), len(images)))
    images = [i for i, c in zip(images, cloudy) if not c]
    utils.print_elapsed_time()

    # embed some metadata in the remaining image files
    print('Embedding metadata in geotiff headers...')
    for img in images:
        name = filename_from_metadata_dict(img, search_api)
        d = format_metadata_dict(img)
        for b in bands:  # embed some metadata as gdal geotiff tags
            f = os.path.join(out_dir, '{}_band_{}.tif'.format(name, b))
            utils.set_geotif_metadata(f, metadata=d)
    utils.print_elapsed_time()
コード例 #5
0
ファイル: __main__.py プロジェクト: ngrande/sint
args_parser = argparse.ArgumentParser(description='Search all files in a '
                                      'directory (including subdirectories) '
                                      'for a regex pattern')
args_parser.add_argument('-p', '--regex-pattern', help='regex pattern to '
                         'search for in the file(s) in the directory',
                         required=True)
args_parser.add_argument('-o', '--output-file', nargs='?', help='path to the '
                         'output file where all matching lines will be written'
                         ' to', default='matches.txt', type=str)
args_parser.add_argument('-d', '--directory', nargs='?', type=str,
                         help='directory of file(s) which will be scanned',
                         default='./')

args = args_parser.parse_args()
searcher = Searcher()
start_time = time.time()

count = 0
result = searcher.search_for_pattern(args.directory, args.regex_pattern)
with open(args.output_file, 'w+b') as file:
    for res in result:
        count += len(res)
        file.writelines(res)

end_time = time.time()
print('time elapsed:')
utils.print_elapsed_time(start_time=start_time, end_time=end_time)
print('#### found {0!s} matching line(s) ####'.format(count))
print('done!')
コード例 #6
0
                         '--output-file',
                         nargs='?',
                         help='path to the '
                         'output file where all matching lines will be written'
                         ' to',
                         default='matches.txt',
                         type=str)
args_parser.add_argument('-d',
                         '--directory',
                         nargs='?',
                         type=str,
                         help='directory of file(s) which will be scanned',
                         default='./')

args = args_parser.parse_args()
searcher = Searcher()
start_time = time.time()

count = 0
result = searcher.search_for_pattern(args.directory, args.regex_pattern)
with open(args.output_file, 'w+b') as file:
    for res in result:
        count += len(res)
        file.writelines(res)

end_time = time.time()
print('time elapsed:')
utils.print_elapsed_time(start_time=start_time, end_time=end_time)
print('#### found {0!s} matching line(s) ####'.format(count))
print('done!')
コード例 #7
0
def get_time_series(aoi,
                    start_date=None,
                    end_date=None,
                    bands=[8],
                    out_dir='',
                    search_api='devseed',
                    parallel_downloads=100,
                    debug=False):
    """
    Main function: crop and download a time series of Landsat-8 images.
    """
    utils.print_elapsed_time.t0 = datetime.datetime.now()

    # list available images
    seen = set()
    if search_api == 'devseed':
        images = search_devseed.search(aoi, start_date, end_date,
                                       'Landsat-8')['results']
        images.sort(key=lambda k: (k['acquisitionDate'], k['row'], k['path']))

        # remove duplicates (same acquisition day)
        images = [
            x for x in images
            if not (x['acquisitionDate'] in seen or  # seen.add() returns None
                    seen.add(x['acquisitionDate']))
        ]
    elif search_api == 'planet':
        import search_planet
        images = search_planet.search(aoi,
                                      start_date,
                                      end_date,
                                      item_types=['Landsat8L1G'])

        # sort images by acquisition date, then by acquisiton row and path
        images.sort(key=lambda k: (k['properties']['acquired'], k['properties']
                                   ['wrs_row'], k['properties']['wrs_path']))

        # remove duplicates (same acquisition day)
        images = [
            x for x in images if not (x['properties']['acquired'] in seen
                                      or  # seen.add() returns None
                                      seen.add(x['properties']['acquired']))
        ]
    print('Found {} images'.format(len(images)))
    utils.print_elapsed_time()

    # build urls
    urls = parallel.run_calls(aws_urls_from_metadata_dict,
                              list(images),
                              extra_args=(search_api, ),
                              pool_type='threads',
                              nb_workers=parallel_downloads,
                              verbose=False)

    # build gdal urls and filenames
    download_urls = []
    fnames = []
    for img, bands_urls in zip(images, urls):
        name = filename_from_metadata_dict(img, search_api)
        for b in set(bands +
                     ['QA']):  # the QA band is needed for cloud detection
            download_urls += [
                s for s in bands_urls if s.endswith('B{}.TIF'.format(b))
            ]
            fnames.append(
                os.path.join(out_dir, '{}_band_{}.tif'.format(name, b)))

    # convert aoi coordinates to utm
    ulx, uly, lrx, lry, utm_zone, lat_band = utils.utm_bbx(aoi)

    # download crops
    utils.mkdir_p(out_dir)
    print('Downloading {} crops ({} images with {} bands)...'.format(
        len(download_urls), len(images),
        len(bands) + 1),
          end=' ')
    parallel.run_calls(utils.crop_with_gdal_translate,
                       list(zip(fnames, download_urls)),
                       extra_args=(ulx, uly, lrx, lry, utm_zone, lat_band),
                       pool_type='threads',
                       nb_workers=parallel_downloads)
    utils.print_elapsed_time()

    # discard images that failed to download
    images = [
        x for x in images
        if bands_files_are_valid(x, list(set(bands +
                                             ['QA'])), search_api, out_dir)
    ]
    # discard images that are totally covered by clouds
    utils.mkdir_p(os.path.join(out_dir, 'cloudy'))
    names = [filename_from_metadata_dict(img, search_api) for img in images]
    qa_names = [
        os.path.join(out_dir, '{}_band_QA.tif'.format(f)) for f in names
    ]
    cloudy = parallel.run_calls(is_image_cloudy,
                                qa_names,
                                pool_type='processes',
                                nb_workers=parallel_downloads,
                                verbose=False)
    for name, cloud in zip(names, cloudy):
        if cloud:
            for b in list(set(bands + ['QA'])):
                f = '{}_band_{}.tif'.format(name, b)
                shutil.move(os.path.join(out_dir, f),
                            os.path.join(out_dir, 'cloudy', f))
    print('{} cloudy images out of {}'.format(sum(cloudy), len(images)))
    images = [i for i, c in zip(images, cloudy) if not c]
    utils.print_elapsed_time()

    # group band crops per image
    crops = []  # list of lists: [[crop1_b1, crop1_b2 ...], [crop2_b1 ...] ...]
    for img in images:
        name = filename_from_metadata_dict(img, search_api)
        crops.append([
            os.path.join(out_dir, '{}_band_{}.tif'.format(name, b))
            for b in bands
        ])

    # embed some metadata in the remaining image files
    for bands_fnames in crops:
        for f in bands_fnames:  # embed some metadata as gdal geotiff tags
            for k, v in metadata_from_metadata_dict(img, search_api).items():
                utils.set_geotif_metadata_item(f, k, v)
コード例 #8
0
def get_time_series(aoi, start_date=None, end_date=None, bands=['B04'],
                    out_dir='', search_api='devseed',
                    parallel_downloads=multiprocessing.cpu_count()):
    """
    Main function: crop and download a time series of Sentinel-2 images.
    """
    utils.print_elapsed_time.t0 = datetime.datetime.now()

    # list available images
    if search_api == 'devseed':
        images = search_devseed.search(aoi, start_date, end_date,
                                       'Sentinel-2')['results']
    elif search_api == 'scihub':
        import search_scihub
        images = search_scihub.search(aoi, start_date, end_date,
                                      satellite='Sentinel-2')
    elif search_api == 'planet':
        import search_planet
        images = search_planet.search(aoi, start_date, end_date,
                                      item_types=['Sentinel2L1C'])

    # sort images by acquisition date, then by mgrs id
    images.sort(key=lambda k: date_and_mgrs_id_from_metadata_dict(k, search_api))

    # remove duplicates (same acquisition day, different mgrs tile id)
    seen = set()
    images = [x for x in images if not (date_and_mgrs_id_from_metadata_dict(x, search_api)[0] in seen
                                        or  # seen.add() returns None
                                        seen.add(date_and_mgrs_id_from_metadata_dict(x, search_api)[0]))]
    print('Found {} images'.format(len(images)))
    utils.print_elapsed_time()

    # build urls and filenames
    urls = []
    fnames = []
    for img in images:
        url = aws_url_from_metadata_dict(img, search_api)
        name = filename_from_metadata_dict(img, search_api)
        for b in bands:
            urls.append('{}{}.jp2'.format(url, b))
            fnames.append(os.path.join(out_dir, '{}_band_{}.tif'.format(name, b)))

    # convert aoi coordates to utm
    ulx, uly, lrx, lry, utm_zone, lat_band = utils.utm_bbx(aoi)

    # download crops
    utils.mkdir_p(out_dir)
    print('Downloading {} crops ({} images with {} bands)...'.format(len(urls),
                                                                     len(images),
                                                                     len(bands)),
          end=' ')
    parallel.run_calls(utils.crop_with_gdal_translate, list(zip(fnames, urls)),
                       extra_args=(ulx, uly, lrx, lry, utm_zone, lat_band, 'UInt16'),
                       pool_type='threads', nb_workers=parallel_downloads)
    utils.print_elapsed_time()

    # discard images that failed to download
    images = [x for x in images if bands_files_are_valid(x, bands, search_api,
                                                         out_dir)]
    # discard images that are totally covered by clouds
    utils.mkdir_p(os.path.join(out_dir, 'cloudy'))
    urls = [aws_url_from_metadata_dict(img, search_api) for img in images]
    print('Reading {} cloud masks...'.format(len(urls)), end=' ')
    cloudy = parallel.run_calls(is_image_cloudy_at_location, urls,
                                extra_args=(utils.geojson_lonlat_to_utm(aoi),),
                                pool_type='threads',
                                nb_workers=parallel_downloads, verbose=True)
    for img, cloud in zip(images, cloudy):
        name = filename_from_metadata_dict(img, search_api)
        if cloud:
            for b in bands:
                f = '{}_band_{}.tif'.format(name, b)
                shutil.move(os.path.join(out_dir, f),
                            os.path.join(out_dir, 'cloudy', f))
    print('{} cloudy images out of {}'.format(sum(cloudy), len(images)))
    images = [i for i, c in zip(images, cloudy) if not c]
    utils.print_elapsed_time()

    # group band crops per image
    crops = []  # list of lists: [[crop1_b1, crop1_b2 ...], [crop2_b1 ...] ...]
    for img in images:
        name = filename_from_metadata_dict(img, search_api)
        crops.append([os.path.join(out_dir, '{}_band_{}.tif'.format(name, b))
                      for b in bands])

    # embed some metadata in the remaining image files
    for bands_fnames in crops:
        for f in bands_fnames:  # embed some metadata as gdal geotiff tags
            for k, v in metadata_from_metadata_dict(img, search_api).items():
                utils.set_geotif_metadata_item(f, k, v)
コード例 #9
0
ファイル: symbiotic.py プロジェクト: chubbymaggie/symbiotic
    def _run_symbiotic(self):
        restart_counting_time()

        # disable these optimizations, since LLVM 3.7 does
        # not have them
        self.options.disabled_optimizations = [
            '-aa',
            '-demanded-bits',  # not in 3.7
            '-globals-aa',
            '-forceattrs',  # not in 3.7
            '-inferattrs',
            '-rpo-functionattrs',  # not in 3.7
            '-tti',
            '-bdce',
            '-elim-avail-extern',  # not in 3.6
            '-float2int',
            '-loop-accesses'  # not in 3.6
        ]

        # compile all sources if the file is not given
        # as a .bc file
        if self.options.source_is_bc:
            self.llvmfile = self.sources[0]
        else:
            self._compile_sources()

        if not self.check_llvmfile(self.llvmfile, '-check-concurr'):
            dbg('Unsupported call (probably pthread API)')
            return report_results('unsupported call')

        # link the files that we got on the command line
        # and that we are required to link in on any circumstances
        self.link_unconditional()

        # remove definitions of __VERIFIER_* that are not created by us
        # and syntactically infinite loops
        passes = ['-prepare', '-remove-infinite-loops']

        memsafety = 'VALID-DEREF' in self.options.prp or \
             'VALID-FREE' in self.options.prp or \
             'VALID-MEMTRACK' in self.options.prp or \
             'MEMSAFETY' in self.options.prp
        if memsafety:
            # remove error calls, we'll put there our own
            passes.append('-remove-error-calls')
        elif 'UNDEF-BEHAVIOR' in self.options.prp or\
             'SIGNED-OVERFLOW' in self.options.prp:
            # remove the original calls to __VERIFIER_error and put there
            # new on places where the code exhibits an undefined behavior
            passes += ['-remove-error-calls', '-replace-ubsan']

        self.run_opt(passes=passes)

        # we want to link these functions before instrumentation,
        # because in those we need to check for invalid dereferences
        if memsafety:
            self.link_undefined()
            self.link_undefined()

        # now instrument the code according to properties
        self.instrument()

        passes = self._tool.prepare()
        if passes:
            self.run_opt(passes)

        # link with the rest of libraries if needed (klee-libc)
        self.link()

        # link undefined (no-op when prepare is turned off)
        # (this still can have an effect even in memsafety, since we
        # can link __VERIFIER_malloc0.c or similar)
        self.link_undefined()

        # slice the code
        if not self.options.noslice:
            self.perform_slicing()
        else:
            print_elapsed_time('INFO: Compilation, preparation and '\
                               'instrumentation time')

        # start a new time era
        restart_counting_time()

        # optimize the code after slicing and
        # before verification
        opt = get_optlist_after(self.options.optlevel)
        if opt:
            self.optimize(passes=opt)

        #FIXME: make this KLEE specific
        if not self.check_llvmfile(self.llvmfile):
            dbg('Unsupported call (probably floating handling)')
            return report_results('unsupported call')

        # there may have been created new loops
        passes = ['-remove-infinite-loops']
        passes += self._tool.prepare_after()
        self.run_opt(passes)

        # delete-undefined may insert __VERIFIER_make_symbolic
        # and also other funs like __errno_location may be included
        self.link_undefined()

        if self._linked_functions:
            print('Linked our definitions to these undefined functions:')
            for f in self._linked_functions:
                print_stdout('  ', print_nl=False)
                print_stdout(f)

        # XXX: we could optimize the code again here...
        print_elapsed_time(
            'INFO: After-slicing optimizations and preparation time')

        # tool's specific preprocessing steps
        self.preprocess_llvm()

        if not self.options.final_output is None:
            # copy the file to final_output
            try:
                os.rename(self.llvmfile, self.options.final_output)
                self.llvmfile = self.options.final_output
            except OSError as e:
                msg = 'Cannot create {0}: {1}'.format(
                    self.options.final_output, e.message)
                raise SymbioticException(msg)

        if not self.options.no_verification:
            print('INFO: Starting verification')
            found = self.run_verification()
        else:
            found = 'Did not run verification'

        return report_results(found)