def __write_header(self, args): script_path = os.path.realpath(__file__) self.file.write(self.__render(self.header_template, {'test-date': CURRENT_TIME, 'python-version': utils.get_python_version(), 'script-path': script_path, 'script-version': VERSION, 'appleseed-binary-path': args.tool_path, 'max-abs-diff-allowed': VALUE_THRESHOLD, 'max-diff-comps-count-allowed': MAX_DIFFERING_COMPONENTS})) self.file.flush()
def dump2file(self, output_file): LOG.debug("%s dump2file", self.__class__.__name__) if utils.get_python_version() == 3: fp = codecs.open(output_file, "w", "utf-8") else: fp = open(output_file, 'w') try: for files in self.sorted_dup_files: fp.write("================\n") for _file in files: size = utils.size_renderer(_file.size) fp.write("Size: {0}, File: {1}\n".format(size, _file.path)) finally: fp.close()
def main(): parser = optparse.OptionParser() parser.add_option("-t", "--target-dir", help="The directory where ConE is to be installed.") parser.add_option("-p", "--plugin-package",\ help="The plug-in package to include in the installation.",\ default=None) parser.add_option("-i", "--install-type",\ help="The installation type, can be 'install' (the default) or 'develop'.",\ default='install') parser.add_option("--python-executable",\ help="The Python executable to run type, defaults to 'python'.",\ default='python') (options, args) = parser.parse_args() if options.target_dir is None: parser.error("Target directory must be given") if options.install_type not in ('install', 'build', 'develop'): parser.error("Invalid install type ('%s')" % options.install_type) if not utils.run_command("%s --help" % options.python_executable): log.critical("Could not run '%s'. Please make sure that you "\ "have Python installed and in your path." % options.python_executable) return 1 python_version = utils.get_python_version(options.python_executable) easy_install_cmd = "easy_install-%s" % python_version if not utils.run_command("%s --help" % easy_install_cmd): log.critical("Could not run '%s'. Please make sure that you "\ "have setuptools installed and the Python scripts directory in your path."\ % easy_install_cmd) return 1 try: perform_build(options.target_dir, options.plugin_package, options.install_type, python_version, options.python_executable) except BuildFailedError: return 1 return 0
def __write_header(self, args): script_path = os.path.realpath(__file__) git_commit_hash, git_commit_title, git_commit_html_fragment = git_information( self.template_directory) self.file.write( self.__render( self.header_template, { 'test-date': CURRENT_TIME, 'python-version': utils.get_python_version(), 'script-path': script_path, 'script-version': VERSION, 'appleseed-binary-path': args.tool_path, 'max-abs-diff-allowed': VALUE_THRESHOLD, 'max-diff-comps-count-allowed': MAX_DIFFERING_COMPONENTS, 'git-commit-hash': git_commit_hash, 'git-commit-title': git_commit_title, 'git-commit-html-fragment': git_commit_html_fragment })) self.file.flush()
def add_python_version(data: dict) -> dict: data["python_version"] = utils.get_python_version() return data
from __future__ import (absolute_import, division, print_function, unicode_literals) import requests import json import utils import clickhouse import datetime import logging if utils.get_python_version().startswith('2'): from urllib import urlencode else: from urllib.parse import urlencode logger = logging.getLogger('logs_api') HOST = 'https://api-metrika.yandex.ru' def get_estimation(user_request): '''Returns estimation of Logs API (whether it's possible to load data and max period in days)''' url_params = urlencode( [ ('date1', user_request.start_date_str), ('date2', user_request.end_date_str), ('source', user_request.source), ('fields', ','.join(user_request.fields)), ('oauth_token', user_request.token)
Created on 2016-7-5 @author: Danny DannyWork Project """ import socket import threading import time import logging import argparse from utils import close_socket, parse_ping_data, reply_ping_data, get_python_version, start_data_transfer, \ PING_SENDING_START, TRANSFER_PREPARE, TRANSFER_READY if get_python_version() == '2': from exceptions import * class ConnectionHold(threading.Thread): """ 连接保持与传输检测 """ socket = None secret = '' target_ip = None target_port = None socket_timeout = 120
import os import datetime # distances = [0.2, 0.1] # percents = [90, 95, 99] # activations = ['leaky_relu'] from utils import get_python_version python_version = get_python_version() global_distance = 0.1 labels = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] model_names = [ 'mnist_test_model_5_30', 'mnist_test_model_8_20', 'mnist_test_model_6_25' ] arch = [(5, 30), (8, 20), (6, 25)] techniques = ['tarantula', 'ochiai', 'dstar', 'random'] repeat = [1, 2, 3, 4, 5] suspics_num = [1, 2, 3, 5, 10] step_size = [0.01, 0.1, 1, 10] for step in step_size: for label in labels: for model_name in model_names: for tech in techniques: for sn in suspics_num: for rep in repeat: if (not tech == 'random') and rep > 1: continue if python_version == 2:
logger.info('### SAVING DATA') for part in range(api_request.size): logger.info('Part #' + str(part)) logs_api.save_data(api_request, part) logger.info('### CLEANING DATA') logs_api.clean_data(api_request) except Exception as e: logger.critical('Iteration #{i} failed'.format(i=i + 1)) if i == config['retries'] - 1: raise e if __name__ == '__main__': print('##### python', utils.get_python_version()) start_time = time.time() config = utils.get_config() setup_logging(config) user_request = build_user_request(config) # If data for specified period is already in database, script is skipped if clickhouse.is_data_present(user_request.start_date_str, user_request.end_date_str, user_request.source): logging.critical('Data for selected dates is already in database') exit(0) integrate_with_logs_api(config, user_request)
def main(argv): # ----------- # Parse args # ----------- parser = OptionParser() parser.add_option("-t", "--target-dir", help="The directory where the test are to be exported.") parser.add_option( "-p", "--plugin-package", help="The plug-in package for exporting plug-in integration tests.", default=None) (options, args) = parser.parse_args() if options.target_dir is None: parser.error("Target directory must be given") PYTHON_VERSION = utils.get_python_version() TARGET_PATH = options.target_dir PLUGIN_PACKAGE = options.plugin_package log.info("Target directory: %s" % TARGET_PATH) log.info("Plug-in package: %r" % PLUGIN_PACKAGE) log.info("Python version: %s" % PYTHON_VERSION) log.info("Cleaning target directory...") utils.recreate_dir(TARGET_PATH) # ------------------------- # Export script test files # ------------------------- log.info("Copying script test files...") SCRIPT_TESTS_DIR = os.path.join(SCRIPTS_SOURCE_ROOT, 'tests') assert os.path.exists(SCRIPT_TESTS_DIR) copy_dir(source_dir=SCRIPT_TESTS_DIR, target_dir=os.path.join(TARGET_PATH, 'tests'), dir_ignore_functions=[ lambda d: d in ('.svn', 'temp', 'export_standalone') ], file_ignore_functions=[ lambda f: f == 'cone.log' or f.endswith('.pyc') ]) log.info("Copying script test overlay files...") copy_dir(source_dir=os.path.join(ROOT_PATH, "export-bat/scripts-tests-overlay"), target_dir=TARGET_PATH, dir_ignore_functions=[lambda d: d == '.svn']) # -------------------------------------- # Export plug-in integration test files # -------------------------------------- log.info("Exporting plug-in integration test files...") subpaths_by_package = plugin_utils.find_plugin_package_subpaths( PLUGIN_SOURCE_ROOT, 'integration-test', PLUGIN_PACKAGE) for package_name, tests_path in subpaths_by_package: log.debug(" Package: %s" % package_name) log.debug(" Path: %s" % tests_path) log.debug(" Copying test files...") target_path = os.path.join(TARGET_PATH, 'plugin-tests', package_name + '_tests') copy_dir(source_dir=tests_path, target_dir=target_path, dir_ignore_functions=[lambda d: d in ('.svn', 'temp')], file_ignore_functions=[ lambda f: f in ('cone.log', 'export_standalone.py') or f.endswith('.pyc') ]) log.debug(" Copying overlay files...") overlay_path = os.path.join( 'export-bat/plugin-integration-test-overlay') copy_dir(source_dir=overlay_path, target_dir=target_path, dir_ignore_functions=[lambda d: d == '.svn']) log.debug(" Exporting extra data...") func = read_export_function_from_file( os.path.join(tests_path, 'export_standalone.py')) if func: log.debug(" Executing export function...") func(target_path) TARGET_EGGS_DIR = os.path.join(TARGET_PATH, 'eggs') # --------------------------- # Copy needed dependency eggs # --------------------------- log.info("Copying library eggs...") DEP_EGGS_DIR = os.path.normpath(os.path.join(ROOT_PATH, '../dep-eggs')) assert os.path.isdir(DEP_EGGS_DIR) DEPENDENCIES = ['simplejson'] for dep in DEPENDENCIES: egg_file_name = find_egg_file(DEP_EGGS_DIR, dep, PYTHON_VERSION) if egg_file_name is None: log.critical( "Could not find egg file for dependency '%s' from '%s'" % (dep, DEP_EGGS_DIR)) return 1 source_path = os.path.join(DEP_EGGS_DIR, egg_file_name) target_path = os.path.join(TARGET_EGGS_DIR, egg_file_name) utils.copy_file(source_path, target_path) # ------------------ # Build needed eggs # ------------------ log.info("Building eggs...") utils.build_egg(os.path.join(SOURCE_ROOT), TARGET_EGGS_DIR) utils.build_egg(os.path.join(SOURCE_ROOT, 'testautomation'), TARGET_EGGS_DIR) return 0