def copy_documentation():
    documentation_path = posixpath.join(config.RESOURCE_DIR, 'README.md')
    release_documentation_path = config.RELEASE_DIR

    utility.make_directory(release_documentation_path)

    shutil.copy2(documentation_path, release_documentation_path)
def copy_header_files(softdevice_name='s130'):
    serialization_header_path = posixpath.join(config.CODE_DIR, 'inc')
    include_override_path = posixpath.join(config.CODE_DIR, 'inc_override')
    softdevice_header_path = posixpath.join(config.NRF51_SDK_INCLUDE_DIR, 'softdevice',
                                            softdevice_name, 'headers')

    header_file_list = []

    file_list_append(header_file_list, serialization_header_path, 'sd_rpc.h')
    file_list_append(header_file_list, include_override_path, 'nrf_svc.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_hci.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_err.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_types.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_ranges.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_l2cap.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_gap.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_gatt.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_gattc.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_gatts.h')
    file_list_append(header_file_list, softdevice_header_path, 'nrf_error.h')

    release_include_path = posixpath.join(config.RELEASE_DIR, 'driver', 'include')

    utility.make_directory(release_include_path)

    for header_file_path in header_file_list:
        shutil.copy2(header_file_path, release_include_path)
示例#3
0
    def __init__(self, configuration):

        utility.Print('python_info',
                      '\nCreated instance of MergeHandler class')

        # Get campaign name
        self.campaigns = configuration.general.campaigns
        self.campaigns_info = configuration.campaigns.info

        # Force all regardless of already existing files
        self.force_all = configuration.general.force_all

        # Work with local files or remote
        self.work_locally = configuration.general.work_locally

        # ------ Paths -------
        self.path_main = configuration.paths.main
        self.path_logical_file_names = configuration.paths.logical_file_names
        self.path_histograms = utility.make_directory(
            configuration.paths.histograms)
        self.path_samples = configuration.paths.samples
        self.path_plots_final = utility.make_directory(
            configuration.paths.plots_final)

        # ------ Samples -------
        self.samples_info = configuration.samples.info

        # ------ Browsing options -------
        self.remote_locations = configuration.general.remote_locations

        # Merge datasets configs
        self.luminosity = configuration.general.luminosity
        self.number_of_files = configuration.general.number_of_files
        self.analyzer_module = configuration.general.analyzer_module
        self.groups = configuration.general.groups
示例#4
0
def create_image_info(input_file_path, output_file_path):

    if os.path.isfile(output_file_path):
        return

    print("{} -> {}".format(input_file_path, output_file_path))

    img = Image.open(input_file_path)
    exif_dict = img._getexif()

    exif_result_info = {}
    for tag_id, value in exif_dict.items():
        tag = ExifTags.TAGS.get(tag_id, tag_id)
        if isinstance(value, bytes):
            continue
        exif_result_info[tag] = value
    # print(tag_dict)
    """
    exif_result_info = {}
    for k, v in exif_dict.items():
        tag_value = jpeg_tag_dict.get(k, None)
        if tag_value:
            tag_name = tag_value[0]
            exif_result_info[tag_name] = v
    """

    utility.make_directory(output_file_path)
    write_data = json.dumps(
        exif_result_info,
        cls=JsonEncoder,
        indent="  ",
    )
    with open(output_file_path, "w") as fp:
        fp.write(write_data)
def copy_header_files(softdevice_name='s130'):
    serialization_header_path = posixpath.join(config.CODE_DIR, 'inc')
    include_override_path = posixpath.join(config.CODE_DIR, 'inc_override')
    softdevice_header_path = posixpath.join(config.NRF51_SDK_INCLUDE_DIR,
                                            'softdevice', softdevice_name,
                                            'headers')

    header_file_list = []

    file_list_append(header_file_list, serialization_header_path, 'sd_rpc.h')
    file_list_append(header_file_list, include_override_path, 'nrf_svc.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_hci.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_err.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_types.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_ranges.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_l2cap.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_gap.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_gatt.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_gattc.h')
    file_list_append(header_file_list, softdevice_header_path, 'ble_gatts.h')
    file_list_append(header_file_list, softdevice_header_path, 'nrf_error.h')

    release_include_path = posixpath.join(config.RELEASE_DIR, 'driver',
                                          'include')

    utility.make_directory(release_include_path)

    for header_file_path in header_file_list:
        shutil.copy2(header_file_path, release_include_path)
def copy_python_binding_release_files(softdevice_name='s130'):
    s130_bindings_release_path = posixpath.join(config.RELEASE_DIR, 'python')
    utility.make_clean_directory(s130_bindings_release_path)

    python_binding_lib_name = softdevice_name + '_' + config.LIB_NAME
    python_binding_py_name = python_binding_lib_name + '.py'
    python_binding_pyd_name = '_' + python_binding_lib_name + config.PYTHON_BINDING_SUFFIX

    python_bindings_build_path = posixpath.join(config.BUILD_DIR, 'python')
    python_binding_release_path = posixpath.join(config.RELEASE_DIR, 'python')

    utility.make_directory(python_binding_release_path)

    python_binding_py_path = posixpath.join(python_bindings_build_path,
                                            python_binding_py_name)
    python_binding_pyd_path = posixpath.join(python_bindings_build_path,
                                             python_binding_pyd_name)

    shutil.copy2(python_binding_py_path, python_binding_release_path)
    shutil.copy2(python_binding_pyd_path, python_binding_release_path)

    python_binding_path = config.BINDINGS_DIR
    python_binding_examples_path = posixpath.join(python_binding_path, 'examples')
    python_binding_examples_release_path = posixpath.join(python_binding_release_path, 'examples')

    python_bindings_util_path = posixpath.join(python_binding_path, 'src', 'ble_driver_util.py')
    shutil.copy2(python_bindings_util_path, python_binding_release_path)
    shutil.copytree(python_binding_examples_path, python_binding_examples_release_path)
示例#7
0
def combine_file(combine_file_path, pic_file_path_0, pic_file_path_1):
    def _get_mask_image(target_image_size):
        for ref_image_size in ((1200, 800), (1124, 800), (800, 1154),
                               (800, 1200), (1116, 800), (1187, 800), (1167,
                                                                       800)):
            if abs((target_image_size[0] / target_image_size[1]) -
                   (ref_image_size[0] / ref_image_size[1])) < 0.02:
                mask_image = Image.open(
                    os.path.join(
                        settings.MASK_DIRECTORY_PATH,
                        "mask_{}_{}.bmp".format(*ref_image_size))).convert(
                            "1").resize(target_image_size)
                return mask_image

        return None

    image_0 = Image.open(pic_file_path_0)
    image_1 = Image.open(pic_file_path_1)

    mask_image = _get_mask_image(image_0.size)

    print(pic_file_path_0, image_0.size)

    if (image_0.size == image_1.size == mask_image.size):
        out_image = Image.composite(image_1, image_0, mask_image)

        print(combine_file_path, out_image.size)
        utility.make_directory(combine_file_path)
        out_image.save(combine_file_path)
def copy_hex_files(softdevice_name='s130'):
    hex_file_list = []

    version = {
        's110': '7.1.0',
        's120': '1.0.1',
        's130': '1.0.0'
    }[softdevice_name]

    hex_resource_path = config.RESOURCE_DIR

    hex_file_name = 'connectivity_115k2_with_{0}_{1}.hex'.format(
        softdevice_name, version)
    file_list_append(hex_file_list, hex_resource_path, hex_file_name)

    if not config.PLATFORM_SYSTEM == 'Darwin':
        hex_file_name = 'connectivity_1m_with_{0}_{1}.hex'.format(
            softdevice_name, version)
        file_list_append(hex_file_list, hex_resource_path, hex_file_name)

    release_hex_path = posixpath.join(config.RELEASE_DIR, 'firmware')

    utility.make_directory(release_hex_path)

    for hex_file_path in hex_file_list:
        shutil.copy2(hex_file_path, release_hex_path)
示例#9
0
def create_movie_small(size, bv, ba, input_file_path, output_file_path):

    if os.path.isfile(output_file_path):
        return

    utility.make_directory(output_file_path)

    command = (
        cf.FFMPEG_PATH,
        "-y",
        "-i",
        input_file_path,
        "-movflags",
        "faststart",
        "-c:v",
        "libx264",  # libx265 # libx264 # mpeg2 # libxvid
        "-vf",
        "scale={}:-1".format(size),
        "-b:v",
        bv,
        "-c:a",
        "aac",
        # "-acodec aac -strict experimental"
        "-b:a",
        ba,
        output_file_path)
    print(command)

    def call_back(returncode, stdout_message):
        print(returncode, stdout_message)
        pass

    utility.create_process(command, call_back, 1, 2400)()
示例#10
0
文件: FileTool.py 项目: ferencek/BTV
    def __init__(self, configuration):

        utility.Print('python_info', '\nCreated instance of FileTool class')

        # ------ Paths -------
        self.path_main = configuration.paths.main
        self.path_samples = utility.make_directory(configuration.paths.samples)
        self.path_logical_file_names = utility.make_directory(
            configuration.paths.logical_file_names)
        self.path_batch_results = utility.make_directory(
            configuration.paths.batch_results)
        self.path_batch_templates = utility.make_directory(
            configuration.paths.batch_templates)
        self.path_proxy = configuration.paths.proxy

        # ------ Batch options -------
        self.batch_type = configuration.general.batch_type
        self.number_of_jobs = configuration.general.number_of_jobs
        self.send_jobs = configuration.general.send_jobs
        self.batch_templates = configuration.general.batch_templates

        # ------ Samples -------
        self.samples_info = configuration.samples.info
        self.samples = self.samples_info.keys()

        # ------ Browsing options -------
        self.remote_locations = configuration.general.remote_locations
        self.search_keywords = configuration.general.search_keywords
示例#11
0
def download_pic(opener, eventno, url, file_path):

    print("url-> ", url)
    print("file_path-> ", file_path)

    if opener is None:
        return

    req = urllib.request.Request(url)
    referer_url = '{}/?action_user_FastViewer=t&eventno={}'.format(
        settings.URL_ROOT, eventno)
    req.add_header('Referer', referer_url)
    req.add_header('User-Agent', settings.HTTP_HEADER_USER_AGENT)
    req.add_header('accept-encoding', 'gzip, deflate, br')
    req.add_header(
        'accept',
        'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3'
    )
    req.add_header('cache-control', 'max-age=0')
    # req.add_header('cookie', JPEG_COOKIE)
    jpg = opener.open(req).read()

    utility.make_directory(file_path)
    with open(file_path, mode="wb") as f:
        f.write(jpg)
def copy_python_binding_release_files(softdevice_name='s130'):
    s130_bindings_release_path = posixpath.join(config.RELEASE_DIR, 'python')
    utility.make_clean_directory(s130_bindings_release_path)

    python_binding_lib_name = softdevice_name + '_' + config.LIB_NAME
    python_binding_py_name = python_binding_lib_name + '.py'
    python_binding_pyd_name = '_' + python_binding_lib_name + config.PYTHON_BINDING_SUFFIX

    python_bindings_build_path = posixpath.join(config.BUILD_DIR, 'python')
    python_binding_release_path = posixpath.join(config.RELEASE_DIR, 'python')

    utility.make_directory(python_binding_release_path)

    python_binding_py_path = posixpath.join(python_bindings_build_path,
                                            python_binding_py_name)
    python_binding_pyd_path = posixpath.join(python_bindings_build_path,
                                             python_binding_pyd_name)

    shutil.copy2(python_binding_py_path, python_binding_release_path)
    shutil.copy2(python_binding_pyd_path, python_binding_release_path)

    python_binding_path = config.BINDINGS_DIR
    python_binding_examples_path = posixpath.join(python_binding_path,
                                                  'examples')
    python_binding_examples_release_path = posixpath.join(
        python_binding_release_path, 'examples')

    python_bindings_util_path = posixpath.join(python_binding_path, 'src',
                                               'ble_driver_util.py')
    shutil.copy2(python_bindings_util_path, python_binding_release_path)
    shutil.copytree(python_binding_examples_path,
                    python_binding_examples_release_path)
def copy_documentation():
    documentation_path = posixpath.join(config.RESOURCE_DIR, 'README.md')
    release_documentation_path = config.RELEASE_DIR

    utility.make_directory(release_documentation_path)

    shutil.copy2(documentation_path, release_documentation_path)
示例#14
0
def copy_serialization_dll(softdevice_name='s130'):
    dynamic_library_name = config.DYNAMIC_LIB_PREFIX + softdevice_name + '_' + config.LIB_NAME
    dynamic_library_file = dynamic_library_name + config.DYNAMIC_LIB_SUFFIX
    dynamic_library_path = posixpath.join(config.SERIALIZATION_BUILD_DIR,
                                          dynamic_library_file)
    dynamic_library_dest_path = posixpath.join(config.RELEASE_DIR, "driver", "lib")
    utility.make_directory(dynamic_library_dest_path)
    shutil.copy2(dynamic_library_path, dynamic_library_dest_path)
示例#15
0
def build():
    logger.info('Building serialization dll artifacts with CMake')

    sdk_info = Sdk(config.ARTIFACTS_ROOT, config.SDK_VERSION)

    utility.make_directory(config.SERIALIZATION_BUILD_DIR)

    if config.PLATFORM_SYSTEM == 'Windows':
        generator = 'MinGW Makefiles'
    elif config.PLATFORM_SYSTEM in ['Linux', 'Darwin']:
        generator = 'Unix Makefiles'
    else:
        raise SystemError('Unknown platform. Not able to determine generator.')

    cmake_environment = None

    # Remove any git/bin path in environment variable PATH
    if config.PLATFORM_SYSTEM == 'Windows':
        environment_path = os.environ['PATH']
        environment_path_list = environment_path.split(';')
        environment_path_list = [path for path in environment_path_list if 'Git\\bin' not in path]
        environment_path = ';'.join(environment_path_list)

        cmake_environment = copy.copy(os.environ)
        cmake_environment['PATH'] = environment_path

    for artifact in ['driver', 'binding']:
        cmake_args = ['cmake', '-G', '{0}'.format(generator),
                      '-DNRF51_SDK_PATH={0}'.format(sdk_info.path),
                      '-DSERIALIZATION_VERSION={0}'.format(config.VERSION),
                      '-DSERIALIZATION_REVISION={0}'.format(config.REVISION),
                      '-DARTIFACT={0}'.format(artifact),
                      config.REPO_ROOT_DIR]

        logging.debug("Starting to build with command: %s", " ".join(cmake_args))

        return_code = None

        try:
            return_code = subprocess.call(cmake_args,
                                          shell=False,
                                          env=cmake_environment)

            if return_code != 0:
                err_msg = 'Failed to prepare build of {0} libraries. Error code: {1}.'.format(artifact, return_code)
                utility.add_log_message(err_msg)
                raise SystemError(err_msg)

            return_code = subprocess.call([config.PLATFORM_MAKE], shell=True)

            if return_code != 0:
                err_msg = 'Failed to build artifact {0}. Error code: {0}.'.format(artifact, return_code)
                utility.add_log_message(err_msg)
                raise SystemError(err_msg)
        except Exception, e:
            logger.fatal(e)
            return return_code
def copy_serialization_dll(softdevice_name='s130'):
    dynamic_library_name = config.DYNAMIC_LIB_PREFIX + softdevice_name + '_' + config.LIB_NAME
    dynamic_library_file = dynamic_library_name + config.DYNAMIC_LIB_SUFFIX
    dynamic_library_path = posixpath.join(config.SERIALIZATION_BUILD_DIR,
                                          dynamic_library_file)
    dynamic_library_dest_path = posixpath.join(config.RELEASE_DIR, "driver",
                                               "lib")
    utility.make_directory(dynamic_library_dest_path)
    shutil.copy2(dynamic_library_path, dynamic_library_dest_path)
示例#17
0
def copy_windows_lib(softdevice_name='s130'):
    import_library_name = softdevice_name + '_' + config.LIB_NAME + config.IMPORT_LIB_SUFFIX
    import_library_path = posixpath.join(config.BUILD_DIR, 'driver',
                                         import_library_name)

    release_import_library_path = posixpath.join(config.RELEASE_DIR, 'driver', 'lib')

    utility.make_directory(release_import_library_path)

    shutil.copy2(import_library_path, release_import_library_path)
def copy_windows_lib(softdevice_name='s130'):
    import_library_name = softdevice_name + '_' + config.LIB_NAME + config.IMPORT_LIB_SUFFIX
    import_library_path = posixpath.join(config.BUILD_DIR, 'driver',
                                         import_library_name)

    release_import_library_path = posixpath.join(config.RELEASE_DIR, 'driver',
                                                 'lib')

    utility.make_directory(release_import_library_path)

    shutil.copy2(import_library_path, release_import_library_path)
示例#19
0
文件: execute.py 项目: nkinta/picture
def write_jpeg(app_segment_data, jpeg_data_list, output_file_path):
    # ep = ">"
    byte_order = 0x4d4d
    ep = ENDIAN_TYPE[byte_order]

    utility.make_directory(output_file_path)
    with open(output_file_path, "wb") as f:
        data = struct.pack("%sH" % ep, SOI_TAG)
        f.write(data)
        app_segment_data.write_segment_header(f, ep, byte_order)
        for jpeg_data in jpeg_data_list:
            f.write(jpeg_data)
示例#20
0
def create_image_middle_jpg(input_file_path, output_file_path):

    if os.path.isfile(output_file_path):
        return

    print("{} -> {}".format(input_file_path, output_file_path))

    img = Image.open(input_file_path)
    img.resize((800, 800), Image.ANTIALIAS)

    utility.make_directory(output_file_path)
    img.save(output_file_path)
示例#21
0
def create_image_small_jpg(input_file_path, output_file_path):

    if os.path.isfile(output_file_path):
        return

    print("{} -> {}".format(input_file_path, output_file_path))

    img = Image.open(input_file_path)
    img.thumbnail((160, 160), Image.ANTIALIAS)

    utility.make_directory(output_file_path)
    img.save(output_file_path)
示例#22
0
文件: main.py 项目: nkinta/picture
def execute_ffmpeg(url, output_file_path):

    if os.path.isfile(output_file_path):
        return

    utility.make_directory(output_file_path)

    command = (settings.FFMPEG_PATH, "-i", url, "-y", "-movflags", "faststart",
               "-c", "copy", "-bsf:a", "aac_adtstoasc", output_file_path)
    print(command)

    def call_back(returncode, stdout_message):
        print(returncode, stdout_message)
        pass

    utility.create_process(command, call_back, 1, 2400)()
示例#23
0
文件: trace.py 项目: Q-KIM/PyClone
 def open(self):
     make_directory(self.trace_dir)
     
     self.alpha_writer = ConcentrationParameterWriter(self.trace_dir)
     
     self.labels_writer = LabelsWriter(self.trace_dir, self.mutation_ids)
     
     self.cellular_frequency_writers = {}
     
     for sample_id in self.sample_ids:
         self.cellular_frequency_writers[sample_id] = CellularFrequenciesWriter(self.trace_dir, 
                                                                                sample_id, 
                                                                                self.mutation_ids)
     
     if self.update_precision:
         self.precision_writer = PrecisionWriter(self.trace_dir)
示例#24
0
文件: trace.py 项目: Q-KIM/PyClone
    def open(self):
        make_directory(self.trace_dir)

        self.alpha_writer = ConcentrationParameterWriter(self.trace_dir)

        self.labels_writer = LabelsWriter(self.trace_dir, self.mutation_ids)

        self.cellular_frequency_writers = {}

        for sample_id in self.sample_ids:
            self.cellular_frequency_writers[
                sample_id] = CellularFrequenciesWriter(self.trace_dir,
                                                       sample_id,
                                                       self.mutation_ids)

        if self.update_precision:
            self.precision_writer = PrecisionWriter(self.trace_dir)
示例#25
0
文件: main.py 项目: nkinta/picture
def get_iframe_html(opener, url, session_id):

    referer_url = settings.URL_ROOT

    req = urllib.request.Request(url)
    req.add_header('Referer', referer_url)
    req.add_header('accept', 'application/json, text/javascript, */*; q=0.01')
    req.add_header('accept-language', settings.HTTP_HEADER_LANGUAGE)
    req.add_header('User-Agent', settings.HTTP_HEADER_USER_AGENT)

    res = opener.open(req)

    file_path = os.path.join(settings.PICTURE_TEMP_PATH,
                             "{}_iframe.html".format(session_id))

    res_data = res.read()

    utility.make_directory(file_path)
    with open(file_path, mode="wb") as f:
        f.write(res_data)

    soup = BeautifulSoup(res_data, "html.parser")
    video_element = soup.find("video")
    if video_element is None:
        print("video element not found -> id:{}".format(session_id))
        return

    movie_url_element = video_element.find("source")
    if movie_url_element is None:
        print("video source element not found -> id:{}".format(session_id))
        return

    movie_url_value = movie_url_element.get("src")
    print(movie_url_value)

    parse_result = urllib.parse.urlparse(movie_url_value)
    scheme, netloc, url_path, _, _, _ = parse_result

    hd_2000k_url = url = urllib.parse.urlunparse(
        (scheme, netloc, url_path.replace("index.m3u8",
                                          "hd_2000k_.m3u8"), None, None, None))

    file_path = os.path.join(settings.PICTURE_TEMP_PATH, "strage",
                             "{}.mp4".format(session_id))

    execute_ffmpeg(hd_2000k_url, file_path)
示例#26
0
文件: main.py 项目: nkinta/picture
def combine_file(file_path_list, target_file_path, mergin, v_or_h,
                 target_override_date):

    if v_or_h == COMBINE_V:
        height_name = "height"
        width_name = "width"
    elif v_or_h == COMBINE_H:
        height_name = "width"
        width_name = "height"

    def calc_total_height(height_name, width_name):
        total_height = 0
        width = getattr(im_list[0], width_name)
        for im in im_list:
            total_height = total_height + getattr(im, height_name)
            if width != getattr(im, width_name):
                raise Error("width difference {} != {}".format(
                    width, getattr(im, width_name)))
        return total_height, width

    im_list = []
    for file_path in file_path_list:
        im_list.append(Image.open(file_path))

    mergin_total = mergin * (len(file_path_list) - 1)

    total_height, width = calc_total_height(height_name, width_name)

    if v_or_h == COMBINE_V:
        target_image = Image.new("RGB", (width, total_height - mergin_total))
    elif v_or_h == COMBINE_H:
        target_image = Image.new("RGB", (total_height - mergin_total, width))

    current_height = 0
    for im in im_list:
        if v_or_h == COMBINE_V:
            target_image.paste(im, (0, current_height))
        elif v_or_h == COMBINE_H:
            target_image.paste(im, (current_height, 0))

        current_height = current_height + getattr(im, height_name) - mergin

    utility.make_directory(target_file_path)
    target_image.save(target_file_path)
    override_date(target_file_path, target_override_date)
示例#27
0
    def __init__(self, configuration):

        utility.Print('python_info',
                      '\nCreated instance of HistogramHandler class')

        # Get campaign name
        self.campaigns = configuration.general.campaigns
        self.campaigns_info = configuration.campaigns.info

        # Force all regardless of already existing files
        self.force_all = configuration.general.force_all

        # Work with local files or remote
        self.work_locally = configuration.general.work_locally

        # ------ Paths -------
        self.path_main = configuration.paths.main
        self.path_samples = configuration.paths.samples
        self.path_logical_file_names = configuration.paths.logical_file_names
        self.path_batch_results = utility.make_directory(
            configuration.paths.batch_results, 'histograms')
        self.path_batch_templates = utility.make_directory(
            configuration.paths.batch_templates, 'histograms')
        self.path_histograms = utility.make_directory(
            configuration.paths.histograms)
        self.path_proxy = configuration.paths.proxy

        # ------ Batch options -------
        self.batch_type = configuration.general.batch_type
        self.number_of_jobs = configuration.general.number_of_jobs
        self.number_of_files = configuration.general.number_of_files
        self.send_jobs = configuration.general.send_jobs
        self.batch_templates = configuration.general.batch_templates[
            'histogram']
        self.queue_lxbatch = configuration.general.queue_lxbatch

        # ------ Samples -------
        self.samples_info = configuration.samples.info

        # ---- btag configuration ----
        self.config_btagvalidation = configuration.btagvalidation.parameters

        # ------ Browsing options -------
        self.remote_locations = configuration.general.remote_locations
示例#28
0
def create_movie_thumbnail(input_file_path, output_file_path):

    if os.path.isdir(os.path.dirname(output_file_path)):
        pass
    else:
        utility.make_directory(output_file_path)

    if os.path.isfile(output_file_path):
        return

    command = (cf.FFMPEG_PATH, "-y", "-i", input_file_path, "-vf",
               "fps=1/10,scale=480:-1", output_file_path)
    print(command)

    def call_back(returncode, stdout_message):
        print(returncode, stdout_message)
        pass

    utility.create_process(command, call_back, 1, 20)()
示例#29
0
    def __init__(self,
                 path_templates,
                 templates,
                 arguments,
                 batch,
                 queue=None):

        self.batch = batch
        self.path_templates = path_templates
        self.templates = templates
        self.arguments = arguments
        self.queue = queue

        # Add aditional layer of directories for each job
        _new_path_batch = self.arguments['<path_batch_file_wo_ext>']
        self.arguments['<path_batch_file_wo_ext>'] = _new_path_batch.replace(
            self.arguments['<path_batch>'], _new_path_batch)
        self.arguments['<path_batch>'] = _new_path_batch

        # make batch directory
        utility.make_directory(self.arguments['<path_batch>'])
示例#30
0
def _create_info_file(root_path, json_encoder, file_info_list_by_date, cls):

    output_file_path = os.path.join(root_path, INFO_FILE_NAME)

    utility.make_directory(output_file_path)
    date_list = [FolderInfo(v) for v, _ in file_info_list_by_date.items()]
    write_data = json.dumps(
        date_list,
        cls=json_encoder,
        indent="  ",
    )
    with open(output_file_path, "w") as fp:
        fp.write(write_data)

    for one_day, file_info_list in file_info_list_by_date.items():

        output_file_path = os.path.join(root_path, one_day,
                                        cls.get_directory_name(),
                                        INFO_FILE_NAME)
        utility.make_directory(output_file_path)
        write_data = json.dumps(
            file_info_list,
            cls=json_encoder,
            indent="  ",
        )
        with open(output_file_path, "w") as fp:
            fp.write(write_data)

    for root, dirs, files in os.walk(root_path):
        if not dirs:
            continue
        folder_list = [FolderInfo(os.path.basename(v)) for v in dirs]
        write_data = json.dumps(
            folder_list,
            cls=json_encoder,
            indent="  ",
        )
        with open(os.path.join(root, INFO_FILE_NAME), "w") as fp:
            fp.write(write_data)
示例#31
0
文件: main.py 项目: nkinta/picture
def login(login_id, password, local_test_flag, do_login_flag, root_path):

    cookie_file_path = os.path.join(root_path, "cookie.txt")

    if local_test_flag:
        return None

    utility.make_directory(cookie_file_path)
    if os.path.isfile(cookie_file_path) == False:
        with open(cookie_file_path, "w") as fp:
            fp.write("# Netscape HTTP Cookie File")
    cookie_jar = http.cookiejar.MozillaCookieJar(cookie_file_path)
    cookie_jar.load()

    cookie_processor = urllib.request.HTTPCookieProcessor(cookie_jar)
    opener = urllib.request.build_opener(cookie_processor)

    if do_login_flag:
        phrase = get_root_html(opener, root_path)
        login_main(opener, login_id, password, phrase)

    return opener
示例#32
0
def copy_hex_files(softdevice_name='s130'):
    hex_file_list = []

    version = {
        's110': '7.1.0',
        's120': '1.0.1',
        's130': '1.0.0'}[softdevice_name]

    hex_resource_path = config.RESOURCE_DIR

    hex_file_name = 'connectivity_115k2_with_{0}_{1}.hex'.format(softdevice_name, version)
    file_list_append(hex_file_list, hex_resource_path, hex_file_name)

    if not config.PLATFORM_SYSTEM == 'Darwin':
        hex_file_name = 'connectivity_1m_with_{0}_{1}.hex'.format(softdevice_name, version)
        file_list_append(hex_file_list, hex_resource_path, hex_file_name)

    release_hex_path = posixpath.join(config.RELEASE_DIR, 'firmware')

    utility.make_directory(release_hex_path)

    for hex_file_path in hex_file_list:
        shutil.copy2(hex_file_path, release_hex_path)
示例#33
0
文件: main.py 项目: nkinta/picture
def get_session_html(opener, url):

    referer_url = settings.URL_ROOT

    req = urllib.request.Request(url)
    req.add_header('Referer', referer_url)
    req.add_header('accept', 'application/json, text/javascript, */*; q=0.01')
    req.add_header('accept-language', settings.HTTP_HEADER_LANGUAGE)
    req.add_header('User-Agent', settings.HTTP_HEADER_USER_AGENT)

    res = opener.open(req)

    session_id = url.split(r"/")[-1]

    file_path = os.path.join(settings.PICTURE_TEMP_PATH,
                             "{}.html".format(session_id))

    res_data = res.read()

    utility.make_directory(file_path)
    with open(file_path, mode="wb") as f:
        f.write(res_data)

    soup = BeautifulSoup(res_data, "html.parser")

    iframe_url_element = soup.find("iframe",
                                   {"class": "frame embed-responsive-item"})
    if iframe_url_element is None:
        print("iframe not found -> id:{}".format(session_id))
        return

    iframe_url_value = iframe_url_element.get("src")

    # print(iframe_url_value)

    get_iframe_html(opener, iframe_url_value, session_id)
示例#34
0
    #PBS -S /bin/bash
    source /home/jswinban/sw/init.sh
    source /home/jswinban/sw/lofim/lofarinit.sh
    cd %s
    time python /home/jswinban/imaging/imaging-multibeam.py %s
"""
TEMPLATE_JOB = textwrap.dedent(TEMPLATE_JOB).strip()

if __name__ == "__main__":
    target_obsid = sys.argv[1]
    cal_obsid = sys.argv[2]
    template_parset = sys.argv[3]

    CAL_OUTPUT = os.path.join(OUTPUT_DIR, "calibrator", cal_obsid)
    TARGET_OUTPUT = os.path.join(OUTPUT_DIR, "target", target_obsid)
    make_directory(CAL_OUTPUT)
    make_directory(TARGET_OUTPUT)

    # Check data exists: we should have sum(BAND_SIZE) subbands in each beam,
    # N_BEAMS beams per target_obsid, and 1 beam per cal_obsid.
    # We write the validated data to input files for the imaging pipeline.
    ms_list = sorted_ms_list(os.path.join(INPUT_DIR, cal_obsid))[:sum(BAND_SIZE)]
    assert(len(ms_list) == sum(BAND_SIZE))
    with open(os.path.join(TARGET_OUTPUT, "cal_ms_list"), 'w') as f:
        for ms in ms_list:
            f.write("%s\n" % ms)

    ms_list = sorted_ms_list(os.path.join(INPUT_DIR, target_obsid))[:sum(BAND_SIZE)*N_BEAMS]
    assert(len(ms_list) == sum(BAND_SIZE) * N_BEAMS)
    with open(os.path.join(TARGET_OUTPUT, "target_ms_list"), 'w') as f:
        for ms in ms_list:
示例#35
0
    # Our single command line argument is a parset containing all
    # configuration information we'll need.
    input_parset = lofar.parameterset.parameterset(sys.argv[1])

    # We require `sbs_per_beam` input MeasurementSets for each beam, including
    # the calibrator.
    sbs_per_beam = sum(input_parset.getIntVector("band_size"))

    print "Locating calibrator data and checking paths"
    ms_cal = {}
    ms_cal["datafiles"] = read_ms_list(input_parset.getString("cal_ms_list"))
    assert len(ms_cal["datafiles"]) == sbs_per_beam
    ms_cal["output_dir"] = os.path.join(
        input_parset.getString("output_dir"), "calibrator", input_parset.getString("cal_obsid")
    )
    make_directory(ms_cal["output_dir"])

    print "Copying calibrator subbands to output"
    ms_cal["datafiles"] = copy_to_work_area(ms_cal["datafiles"], ms_cal["output_dir"])

    print "Locating target data and checking paths"
    # ms_target will be a dict that provides all the information we need to
    # process each independent element of the observation, where an "element"
    # is a combination of a beam (SAP) and a band (number of subbands)
    ms_target = {}

    target_mss = read_ms_list(input_parset.getString("target_ms_list"))
    assert len(target_mss) == input_parset.getInt("n_beams") * sbs_per_beam

    for beam, data in enumerate(zip(*[iter(target_mss)] * sbs_per_beam)):
        start_sb = 0
示例#36
0
文件: FileTool.py 项目: ferencek/BTV
    def copy_files_all_samples_locally(self):

        utility.Print('python_info',
                      '\nCalled copy_files_all_samples_locally function.')

        _batch_templates_copy = self.batch_templates['copy']
        _path_batch_results_copy = utility.make_directory(
            self.path_batch_results, 'copy')
        _path_batch_templates_copy = utility.make_directory(
            self.path_batch_templates, 'copy')

        # Loop over samples
        for _s in self.samples:

            utility.Print('status', '\nSample: {0}'.format(_s))

            # Loop over all remote locations
            for _l in self.remote_locations['path']:

                # Loop over all subsamples
                for _ss in self.samples_info[_s]['subsample'].values():

                    _file_missing = os.path.join(
                        self.path_logical_file_names, 'local', _l, _s,
                        _ss.replace('/', '__') + '_missing.txt')
                    _file_missing = open(_file_missing, 'r')

                    for _n, _f in enumerate(_file_missing):

                        _f = _f.rstrip()

                        # Number of jobs handler
                        if _n >= self.number_of_jobs and not self.number_of_jobs == -1:
                            continue

                        if os.path.isfile(_f):
                            utility.Print(
                                'python_info',
                                'File {0} already exists.'.format(_f))
                            continue

                        # To create batch jobs, one needs to edit template script which is done below
                        _path_remote_samples = os.path.join(
                            self.remote_locations['storage_element'][_l],
                            self.remote_locations['path'][_l])

                        _x = _f.split(
                            '/')  # split input file into chunks wrt to '/'
                        _source = _f.replace(
                            self.path_samples,
                            _path_remote_samples)  # place from where to copy
                        _destination = '/'.join(_x[:-1])  # path where to copy
                        _batch_file_wo_ext = _x[-1].replace(
                            '.root', '')  # file name without any extension
                        _path_batch = _destination.replace(
                            self.path_samples, _path_batch_results_copy
                        )  # path to newly created batch script
                        _path_batch_file_wo_ext = os.path.join(
                            _path_batch, _batch_file_wo_ext
                        )  # path and name of the script without any extension
                        _protocol = self.remote_locations['protocol'][
                            _l]  # protocol used for copying
                        _path_proxy = self.path_proxy  # one needs proxy file to give permission to condor
                        _file_proxy = self.path_proxy.split('/')[-1]

                        _batch_arguments = {
                            '<path_batch>':
                            _path_batch,
                            '<path_batch_file_wo_ext>':
                            _path_batch_file_wo_ext,
                            '<source>':
                            _source,
                            '<destination>':
                            _destination,
                            '<protocol>':
                            _protocol,
                            '<X509_USER_PROXY_path>':
                            _path_proxy,
                            '<X509_USER_PROXY_file>':
                            _file_proxy,
                            '<SCRAM_ARCH>':
                            os.environ['SCRAM_ARCH'],
                            '<CMSSW_dir>':
                            os.path.join(os.environ['CMSSW_BASE'], 'src'),
                        }

                        # Create batch tool instance
                        _batch = utility.BatchTool(_path_batch_templates_copy,
                                                   _batch_templates_copy,
                                                   _batch_arguments,
                                                   self.batch_type)
                        # Make scripts first
                        _batch.make_scripts()
                        # Send jobs
                        if self.send_jobs:
                            _batch.send_job()

                    _file_missing.close()
示例#37
0
文件: FileTool.py 项目: ferencek/BTV
    def check_missing_files_all_samples_locally(self):
        ''' Loop over missing file and remove any file which exists and it has error.'''

        utility.Print(
            'python_info',
            '\nCalled check_missing_files_all_samples_locally function.')

        _batch_templates_check = self.batch_templates['check']
        _path_batch_results_check = utility.make_directory(
            self.path_batch_results, 'check')
        _path_batch_templates_check = utility.make_directory(
            self.path_batch_templates, 'check')

        # Loop over samples
        for _s in self.samples:

            utility.Print('status', '\nSample: {0}'.format(_s))

            # Loop over all remote locations
            for _l in self.remote_locations['path']:

                # Loop over all subsamples
                for _ss in self.samples_info[_s]['subsample'].values():

                    _file_missing = os.path.join(
                        self.path_logical_file_names, 'local', _l, _s,
                        _ss.replace('/', '__') + '_missing.txt')
                    _file_missing = open(_file_missing, 'r')

                    for _f in _file_missing:

                        _f = _f.rstrip()

                        # Send check files on batch
                        if self.send_jobs:

                            _x = _f.split(
                                '/')  # split input file into chunks wrt to '/'
                            _batch_file_wo_ext = _x[-1].replace(
                                '.root', '')  # file name without any extension
                            _destination = '/'.join(_x[:-1])
                            _path_batch = _destination.replace(
                                self.path_samples, _path_batch_results_check
                            )  # path to newly created batch script
                            _path_batch_file_wo_ext = os.path.join(
                                _path_batch, _batch_file_wo_ext
                            )  # path and name of the script without any extension
                            _path_proxy = self.path_proxy  # one needs proxy file to give permission to condor
                            _file_proxy = self.path_proxy.split('/')[-1]

                            _batch_arguments = {
                                '<path_batch>':
                                _path_batch,
                                '<path_batch_file_wo_ext>':
                                _path_batch_file_wo_ext,
                                '<file_name>':
                                _f,
                                '<X509_USER_PROXY_path>':
                                _path_proxy,
                                '<X509_USER_PROXY_file>':
                                _file_proxy,
                                '<SCRAM_ARCH>':
                                os.environ['SCRAM_ARCH'],
                                '<CMSSW_dir>':
                                os.path.join(os.environ['CMSSW_BASE'], 'src'),
                            }

                            # Create batch tool instance
                            _batch = utility.BatchTool(
                                _path_batch_templates_check,
                                _batch_templates_check, _batch_arguments,
                                self.batch_type)
                            # Make scripts first
                            _batch.make_scripts()
                            # Send job
                            _batch.send_job()

                        else:

                            if os.path.isfile(_f):

                                if utility.check_root_file(_f):
                                    utility.Print('status',
                                                  'File OK {0}'.format(_f))
                                else:
                                    utility.Print(
                                        'error',
                                        'Error with {0}. Removing ...'.format(
                                            _f))
                                    sp.check_output(['rm', _f])

                    _file_missing.close()
示例#38
0
文件: FileTool.py 项目: ferencek/BTV
    def save_logical_file_names_all_samples_remotely(self):
        ''' 
    Tries to find all the files for the corresponding samples and saves them in .txt
    NOTE: this function overwrites files every time.
    '''

        utility.Print(
            'python_info',
            '\nCalled save_logical_file_names_all_samples function.')

        # Loop over samples
        for _s in self.samples:

            utility.Print('status', '\nSample: {0}'.format(_s))

            # Loop over all remote locations
            for _l in self.remote_locations['path']:

                utility.make_directory(self.path_logical_file_names, 'remote',
                                       _l, _s)

                # Browse for the sample files
                try:

                    # Get LFNs
                    _remote_path = os.path.join(
                        self.remote_locations['storage_element'][_l],
                        self.remote_locations['path'][_l], _s)
                    _remote_lfns = self._wrapper_gfal_ls_r(
                        _remote_path, self.remote_locations['protocol'][_l])

                    # Filter logical files names so that only interesting ones pass
                    _keywords_any = self.samples_info[_s]['subsample'].values(
                    )  # filter lfn which has subsample string
                    _remote_lfns = [
                        _ll for _ll in _remote_lfns if utility.filter_keywords(
                            _ll, self.search_keywords['all'],
                            self.search_keywords['any'] +
                            _keywords_any, self.search_keywords['none'])
                    ]

                    # Group files according to subsample and save them in separate files
                    _remote_lfns = {
                        _ss: filter(lambda x: '/' + _ss + '/' in x,
                                    _remote_lfns)
                        for _ss in self.samples_info[_s]['subsample'].values()
                    }

                    # Save into files
                    for _ss, _r in _remote_lfns.iteritems():

                        _file = os.path.join(self.path_logical_file_names,
                                             'remote', _l, _s,
                                             _ss.replace('/', '__') + '.txt')

                        # Save to a file
                        with open(_file, 'w') as _output:
                            _output.write('\n'.join(_r))

                        utility.Print(
                            'status',
                            'The LFNs were written to "{}".'.format(_file))

                # If something gets wrong notify
                except Exception, e:
                    utility.Print(
                        'error',
                        "Error with sample {0} at {1}: {2}".format(_s, _l, e))
示例#39
0
def get_picture_list(opener, eventno, categoryno, offset=0):

    file_path = os.path.join(
        settings.PICTURE_TEMP_PATH, "json_data", eventno,
        "category_picture_list_{}_{}.json".format(categoryno, offset))

    json_data = None
    if os.path.isfile(file_path):
        with open(file_path, mode="r") as f:
            try:
                json_data = json.loads(f.read())
            except:
                print("json read error ({})".format(file_path))

    if json_data is None:
        parse_result = urllib.parse.urlparse(settings.URL_ROOT)

        query_dict = {
            'action_Api_FastViewer_PhotoList': 'true',
            'eventno': eventno,
            'categoryno': categoryno,
            'offset': offset,
            'page': 'photolist',
            'sortkey': '0',
        }

        scheme, netloc, path, params, _, fragment = parse_result
        query = urllib.parse.urlencode(query_dict)

        url = urllib.parse.urlunparse(
            (scheme, netloc, path, params, query, fragment))

        print(url)

        req = urllib.request.Request(url)
        referer_url = '{}/?action_user_FastViewer=t&eventno={}'.format(
            settings.URL_ROOT, eventno)
        req.add_header('Referer', referer_url)
        req.add_header('accept',
                       'application/json, text/javascript, */*; q=0.01')
        req.add_header('accept-language', settings.HTTP_HEADER_LANGUAGE)
        req.add_header('User-Agent', settings.HTTP_HEADER_USER_AGENT)

        res = opener.open(req)

        utility.make_directory(file_path)
        with open(file_path, mode="wb") as f:
            f.write(res.read())

        with open(file_path, mode="r") as f:
            json_data = json.loads(f.read())

    json_data_list = []
    json_data_list.append(json_data)

    if json_data["message"]["pager"]["hasnext"]:
        next_json_data_list = get_picture_list(opener,
                                               eventno,
                                               categoryno,
                                               offset=offset + 100)
        json_data_list += next_json_data_list

    return json_data_list
示例#40
0
    # Our single command line argument is a parset containing all
    # configuration information we'll need.
    input_parset = lofar.parameterset.parameterset(sys.argv[1])

    # We require `sbs_per_beam` input MeasurementSets for each beam, including
    # the calibrator.
    sbs_per_beam = sum(input_parset.getIntVector("band_size"))

    print "Locating calibrator data and checking paths"
    ms_cal = {}
    ms_cal["datafiles"] = read_ms_list(input_parset.getString("cal_ms_list"))
    assert (len(ms_cal["datafiles"]) == sbs_per_beam)
    ms_cal["output_dir"] = os.path.join(input_parset.getString("output_dir"),
                                        "calibrator",
                                        input_parset.getString("cal_obsid"))
    make_directory(ms_cal["output_dir"])

    print "Copying calibrator subbands to output"
    ms_cal["datafiles"] = copy_to_work_area(ms_cal["datafiles"],
                                            ms_cal["output_dir"])

    print "Locating target data and checking paths"
    # ms_target will be a dict that provides all the information we need to
    # process each independent element of the observation, where an "element"
    # is a combination of a beam (SAP) and a band (number of subbands)
    ms_target = {}

    target_mss = read_ms_list(input_parset.getString("target_ms_list"))
    assert (len(target_mss) == input_parset.getInt("n_beams") * sbs_per_beam)

    for beam, data in enumerate(zip(*[iter(target_mss)] * sbs_per_beam)):