示例#1
0
import simplekml
import datetime as dt
import csv
import pprint
import re
import pandas
from pymongo import MongoClient, GEOSPHERE
from util import log
from util.config import Config
from bson.code import Code
from bson.son import SON

logger = log.getLogger(__name__)

# MongoDB Client & DB
cfg = Config()
client = MongoClient(cfg.get("mongo", "uri"))
db = client[cfg.get("mongo", "db_strava")]
segments_collection = db[cfg.get("mongo", "coll_segment")]
leaderboard_collection = db[cfg.get("mongo", "coll_leaderboards")]
zip_data_collection = db[cfg.get("mongo", "coll_zip")]
weather_collection = db[cfg.get("mongo", "coll_weather")]
wban_collection = db[cfg.get("mongo", "coll_wban")]

wban_date_format = cfg.get("weather", "date_format")
wban_time_format = cfg.get("weather", "time_format")
strava_datetime_format = cfg.get("strava", "date_time_format")


def merge_segments_and_weather():
    #iterate through segments in segment collection
示例#2
0
def before_all(context):
    context.support = TestSupport()
    context.env = Config()
    context.stage = context.env.get_stage(context.config.userdata['STAGE'])
示例#3
0
 def periodic_task_seconds():
     return Config().get_inteval_seconds('periodic.period',
                                         min_interval='1s',
                                         max_interval='1d')
from util.metrics import mae
from util.metrics import rmse
from datasets import ml1m
from util.config import Config
from models.svd import SVD
from models.svdpp import SVDPP
from sklearn.model_selection import train_test_split

# Note that x is a 2D numpy array, 
# x[i, :] contains the user-item pair, and y[i] is the corresponding rating.
x, y = ml1m.load_data()

x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=0)
#print('x_train', x_train.shape)

config = Config()
config.num_users = np.max(x[:, 0]) + 1
config.num_items = np.max(x[:, 1]) + 1
config.min_value = np.min(y)
config.max_value = np.max(y)

with tf.Session() as sess:

    model = SVDPP(config, sess, dual=False)
    model.train(x_train, y_train, val_data=(x_test, y_test), epoches=1, batch_size=1024)
        
    y_pred = model.predict(x_test)
    print('rmse: {}, mae: {}'.format(rmse(y_test, y_pred), mae(y_test, y_pred)))

    # Save model
    model = model.save_model('model/')
示例#5
0
def parse_config(conf_file):
	conf = Config(conf_file)
	conf.load_conf()
 def __init__(self):
     self.cf = Config()
     self.timeout=float(self.cf.get_http('time_out'))
     self.logger = Log.get_logger()
     self.env = self.url=''
     self.headers = self.params = self.data = self.files = {}
示例#7
0
"""
import re
import sys
import time
import Queue
import urllib
import threading
import HTMLParser
sys.path.append("..")
from util.log import Log
from util.config import Config

reload(sys)
sys.setdefaultencoding('utf-8')

cf = Config()
log = Log()
logger = log.get_logger()
visited_list = []


class LinkParser(HTMLParser.HTMLParser):
    """Costomized url parser class, subclass of HTMLParser.HTMLParser"""
    def __init__(self):
        """ self.links use to return """
        self.links = []
        HTMLParser.HTMLParser.__init__(self)

    def handle_starttag(self, tag, attrs):
        """ get all node start tag is <a>"""
        if tag == 'a':
示例#8
0
    def __init__(self, architecture="ResNet50", learning_rate=0.001, momentum=0.9, batch_size=32, epochs=50, fine_tuning_rate=100, transfer_learning=False, save_weights=True, perc_train=80, perc_validation=20, recreate_dataset=False):
        """
            Constructor of CNNKeras
        """

        self.architecture = Config(
            "Architecture", architecture, str)
        self.learning_rate = Config(
            "Learning rate", learning_rate, float)
        self.momentum = Config(
            "Momentum", momentum, float)
        self.batch_size = Config(
            "Batch size", batch_size, int)
        self.epochs = Config(
            "Epochs", epochs, int)
        self.fine_tuning_rate = Config(
            "Fine Tuning Rate", fine_tuning_rate, int)
        self.transfer_learning = Config(
            "Transfer Learning", transfer_learning, bool)
        self.save_weights = Config(
            "Save weights", save_weights, bool)
        self.perc_train = Config(
            "Perc Train", perc_train, float)
        self.perc_validation = Config(
            "Perc Validation", perc_validation, float)
        self.recreate_dataset = Config(
            "Recreate Dataset", recreate_dataset, bool)
        self.file_name = "kerasCNN"

        self.model = None

        self.trained = False
示例#9
0
文件: utils.py 项目: yuanbw/nauta
def upload_experiment_to_git_repo_manager(username: str, experiment_name: str,
                                          experiments_workdir: str,
                                          run_name: str):
    git_repo_dir = f'.nauta-git-{username}-{compute_hash_of_k8s_env_address()}'
    git_work_dir = os.path.join(experiments_workdir, run_name)

    try:
        create_gitignore_file_for_experiments(git_work_dir)
        fake_ssh_path = get_fake_ssh_path(username=username,
                                          config_dir=Config().config_path)
        git_env = {
            'GIT_SSH': fake_ssh_path,
            'GIT_DIR': os.path.join(experiments_workdir, git_repo_dir),
            'GIT_WORK_TREE': git_work_dir,
            'GIT_TERMINAL_PROMPT': '0',
            'SSH_AUTH_SOCK':
            '',  # Unset SSH_AUTH_SOCK to prevent issues when multiple users are using same nctl
        }
        env = {
            **os.environ,
            **git_env
        }  # Add git_env defined above to currently set environment variables
        if 'LD_LIBRARY_PATH' in env:
            # do not copy LD_LIBRARY_PATH to git exec env - it points to libraries packed by PyInstaller
            # and they can be incompatible with system's git (e.g. libssl)
            del env['LD_LIBRARY_PATH']
        git = ExternalCliClient(executable='git',
                                env=env,
                                cwd=experiments_workdir,
                                timeout=60)
        # ls-remote command must be created manually due to hyphen
        git.ls_remote = git._make_command(name='ls-remote')  #type: ignore
        with TcpK8sProxy(NAUTAAppNames.GIT_REPO_MANAGER_SSH) as proxy:
            if not os.path.isdir(f'{experiments_workdir}/{git_repo_dir}'):
                git.clone(
                    f'ssh://git@localhost:{proxy.tunnel_port}/{username}/experiments.git',
                    git_repo_dir,
                    bare=True)
            git.remote(
                'set-url', 'origin',
                f'ssh://git@localhost:{proxy.tunnel_port}/{username}/experiments.git'
            )
            _initialize_git_client_config(git, username=username)
            git.add('.', '--all')
            git.commit(message=f'experiment: {experiment_name}',
                       allow_empty=True)
            remote_branches, _, _ = git.ls_remote()
            local_branches, _, _ = git.branch()
            if 'master' in local_branches:
                git.checkout('master')
            else:
                git.checkout('-b', 'master')
            if 'master' in remote_branches:
                try:
                    git.pull('--rebase', '--strategy=recursive', '-Xtheirs')
                except Exception:
                    git.rebase('--abort')
                    raise
            git.push('--set-upstream', 'origin', 'master')
            git.tag(experiment_name)
            git.push('--tags')
    except Exception:
        logger.exception(
            f'Failed to upload experiment {experiment_name} to git repo manager.'
        )
        try:
            git_env = {
                'GIT_DIR': os.path.join(experiments_workdir, git_repo_dir),
                'GIT_WORK_TREE': git_work_dir,
                'GIT_TERMINAL_PROMPT': '0',
                'SSH_AUTH_SOCK': '',
            }
            env = {
                **os.environ,
                **git_env
            }  # Add git_env defined above to currently set environment variables
            git = ExternalCliClient(executable='git',
                                    env=env,
                                    cwd=experiments_workdir,
                                    timeout=60)
            git.reset('--hard', 'master', _cwd=experiments_workdir)
        except Exception:
            logger.exception(
                f'Failed to rollback {experiment_name} experiment upload to git repo manager.'
            )
        raise
 def __init__(self):
     self.configObj = Config()
     self.database = Database()
示例#11
0
 def __init__(self):
     self._redis = None
     self._redis_url = Config().get('db.redis.url')
示例#12
0
            pose = np.loadtxt(filename_pose)
            pose = util.norm_pose(pose)
            pose = np.reshape(pose, (-1, 10, 2), 'F')  # n_frame * 10 * 2

            util.write_video(pose, filename_vid, filename_aud)


#%%

if __name__ == '__main__':

    from util.config import Config
    from util.logger import Logger

    config = Config("../model/config_001.json")
    config_idx = config.get("config_idx")
    path_set = "../data/set_" + config_idx

    path_pose_single = config.get("path_pose_single")
    log_filename = os.path.join(path_pose_single, "log.txt")
    logger = Logger(log_filename)

    d = DataPrepVid(config, logger)
    #    d.split_player()
    d.get_segment(["vn"], "train")
    d.get_segment(["vn"], "valid")
    d.get_segment(["vn"], "test")
    # d.get_pose_visualization(["vn"], "train")
    #%%
#    pose = d.pose
示例#13
0
    def __init__(self, story=None):
        self.config = Config()

        if story:
            self.story = story
            self.team = config.get_team_for_story(self.story)
    def __init__(self,
                 architecture="VGG16",
                 learning_rate=0.001,
                 momentum=0.9,
                 batch_size=32,
                 epochs=50,
                 fine_tuning_rate=100,
                 transfer_learning=False,
                 save_weights=True,
                 recreate_dataset=False,
                 train_data_directory="",
                 validation_data_directory="",
                 test_data_directory="",
                 no_label_data_directory=""):
        """
            Constructor of CNNKeras
        """

        self.architecture = Config("Architecture", architecture, str)
        self.learning_rate = Config("Learning rate", learning_rate, float)
        self.momentum = Config("Momentum", momentum, float)
        self.batch_size = Config("Batch size", batch_size, int)
        self.epochs = Config("Epochs", epochs, int)
        self.fine_tuning_rate = Config("Fine Tuning Rate", fine_tuning_rate,
                                       int)
        self.transfer_learning = Config("Transfer Learning", transfer_learning,
                                        bool)
        self.save_weights = Config("Save weights", save_weights, bool)
        self.recreate_dataset = Config("Recreate Dataset", recreate_dataset,
                                       bool)
        self.train_data_directory = Config("Train data directory",
                                           train_data_directory, str)
        self.validation_data_directory = Config("Validation data directory",
                                                validation_data_directory, str)
        self.test_data_directory = Config("Test data directory",
                                          test_data_directory, str)
        self.no_label_data_directory = Config("No label data directory",
                                              no_label_data_directory, str)

        self.model = None
        self.pseudo_label = None
        self.trained = False
示例#15
0
#A snippet showing how to directly run algorithms in QRec
from QRec import QRec  #need to be modified according to your path
from util.config import Config  #need to be modified according to your path
#-----------------------------------------------------------------------------------
#create your own config file by following the file format in the directory of config
#-----------------------------------------------------------------------------------
config = Config("/home/xxx/algor_name.conf")
rec = QRec(config)
rec.execute()
#-----------------------------------------------------------------------------------

#your own codes to use the recommendation results

#-----------------------------------------------------------------------------------
示例#16
0
class deleteUserById(unittest.TestCase):
    # 错误编码
    __result_code = config.read_data("api_result_code")
    # 当前模块名
    __module = os.path.split(os.path.dirname(os.path.abspath(__file__)))[1]
    # 当前接口名称
    __method = sys._getframe().f_code.co_name
    # URL
    __url = config.get("server", "host") + __module + '/' + __method
    # 当前模块的配置文件
    __config = Config(config=os.path.join(DATA_PATH, __module + '.ini'))
    # 当前接口的参数
    __params = __config.read_data(__method)

    @classmethod
    def setUpClass(cls):
        params = {"name": "test", "password": "******"}
        url = config.get("server", "host") + cls.__module + '/' + 'insertUser'
        result = HTTPsClient(url).send(params)
        deleteUserById.user_id = result["data"]["id"]

    def send(self, params):
        result = HTTPsClient(self.__url).send(params)
        # 直接输出字典会有中文乱码的情况
        # 所以使用json库中的dumps方法
        logger.debug(json.dumps(params, ensure_ascii=False))
        logger.debug(json.dumps(result, ensure_ascii=False))
        return result

    def test_success(self):
        '''测试成功'''
        params = self.__params.copy()
        params['id'] = deleteUserById.user_id
        response = self.send(params)
        code_result = response['code']
        self.assertEqual(str(code_result),
                         self.__result_code["STATUS_SUCCESS_CODE"])

    def test_id_not_exist(self):
        '''id不存在'''
        params = self.__params.copy()
        params['id'] = 12345678910
        response = self.send(params)
        code_result = response['code']
        self.assertEqual(str(code_result),
                         self.__result_code["USER_DELETE_FAIL_CODE"])

    def test_id_type_is_string(self):
        '''id类型为string类型'''
        params = self.__params.copy()
        params['id'] = "test"
        response = self.send(params)
        code_result = response['code']
        self.assertEqual(str(code_result),
                         self.__result_code["USER_DELETE_FAIL_CODE"])

    def test_id_is_None(self):
        '''id为None'''
        params = self.__params.copy()
        params['id'] = None
        response = self.send(params)
        code_result = response['code']
        self.assertEqual(str(code_result),
                         self.__result_code["USER_DELETE_FAIL_CODE"])

    def test_id_is_empty(self):
        '''id为空'''
        params = self.__params.copy()
        params['id'] = ""
        response = self.send(params)
        code_result = response['code']
        self.assertEqual(str(code_result),
                         self.__result_code["USER_DELETE_FAIL_CODE"])
示例#17
0
文件: config.py 项目: hzjai0624/nauta
def config(state: State, cpu: str, memory: str):

    if not cpu or not memory:
        handle_error(logger, Texts.MISSING_ARGUMENTS, Texts.MISSING_ARGUMENTS)
        sys.exit(1)

    if not validate_cpu_settings(cpu):
        handle_error(logger, Texts.CPU_WRONG_FORMAT, Texts.CPU_WRONG_FORMAT)
        sys.exit(1)

    if not validate_memory_settings(memory):
        handle_error(logger, Texts.MEMORY_WRONG_FORMAT,
                     Texts.MEMORY_WRONG_FORMAT)
        sys.exit(1)

    configuration = NAUTAConfigMap()

    if configuration.minimal_node_memory_amount and \
       convert_k8s_memory_resource(configuration.minimal_node_memory_amount) > convert_k8s_memory_resource(memory):
        error_message = Texts.MEMORY_SETTINGS_TOO_LOW.format(
            memory_value=configuration.minimal_node_memory_amount)
        handle_error(logger, error_message, error_message)
        sys.exit(1)

    if configuration.minimal_node_cpu_number and \
       convert_k8s_cpu_resource(configuration.minimal_node_cpu_number) > convert_k8s_cpu_resource(cpu):
        error_message = Texts.CPU_SETTINGS_TOO_LOW.format(
            cpu_value=configuration.minimal_node_cpu_number)
        handle_error(logger, error_message, error_message)
        sys.exit(1)

    config_file_location = os.path.join(Config().config_path,
                                        NODE_CONFIG_FILENAME)

    if not os.path.isfile(config_file_location):
        handle_error(logger, Texts.MISSING_CONFIG_FILE,
                     Texts.MISSING_CONFIG_FILE)
        sys.exit(1)

    with open(config_file_location, 'r+', encoding='utf-8') as config_file, \
            spinner(text=Texts.CONFIG_UPDATE):
        config_file_content = yaml.load(config_file)
        cpu_number = str(config_file_content.get(CPU_NUMBER_FIELDNAME))
        memory_amount = str(config_file_content.get(MEMORY_AMOUNT_FIELDNAME))
        cpu_system_required_min = str(
            config_file_content.get(CPU_SYSTEM_REQUIRED_MIN_FIELDNAME))
        cpu_system_required_percent = str(
            config_file_content.get(CPU_SYSTEM_REQUIRED_PERCENT_FIELDNAME))
        memory_system_required_min = str(
            config_file_content.get(MEMORY_SYSTEM_REQUIRED_MIN_FIELDNAME))
        memory_system_required_percent = str(
            config_file_content.get(MEMORY_SYSTEM_REQUIRED_PERCENT_FIELDNAME))

        if not cpu_number or cpu_number == "None" or not memory_amount or memory_amount == "None":
            handle_error(logger, Texts.CONFIG_FILE_INCORRECT,
                         Texts.CONFIG_FILE_INCORRECT)
            sys.exit(1)

        try:
            override_values_in_packs(
                new_cpu_number=cpu,
                new_memory_amount=memory,
                current_cpu_number=cpu_number,
                current_mem_amount=memory_amount,
                cpu_system_required_min=cpu_system_required_min,
                cpu_system_required_percent=cpu_system_required_percent,
                mem_system_required_min=memory_system_required_min,
                mem_system_required_percent=memory_system_required_percent)
        except Exception:
            logger.exception(Texts.ERROR_DURING_UPDATE)
            handle_error(logger, Texts.ERROR_DURING_UPDATE,
                         Texts.ERROR_DURING_UPDATE)
            sys.exit(1)

        config_file.seek(0)
        config_file.truncate()
        config_file_content[CPU_NUMBER_FIELDNAME] = cpu
        config_file_content[MEMORY_AMOUNT_FIELDNAME] = memory
        yaml.dump(config_file_content,
                  config_file,
                  default_flow_style=False,
                  explicit_start=True)

    click.echo(Texts.SUCCESS_MESSAGE)
示例#18
0
import sys
import re

from util.logger import Logger
from util.adb import Adb
from util.config import Config
from util.utils import Utils
from time import sleep

config = Config("config.ini")
Adb.service = config.network["service"]
Adb.device = "-d" if (Adb.service == "PHONE") else "-e"
adb = Adb()
if adb.init():

    Logger.log_msg('Successfully connected to the service.')
    output = Adb.exec_out('wm size').decode('utf-8').strip()
    Logger.log_info(output)
    if not re.search('1080x2340|2340x1080', output):
        Logger.log_error("Resolution is not 2340x1080, please change it.")
        sys.exit()

    Utils.save_screen()

else:
    Logger.log_error('Unable to connect to the service.')
    sys.exit()
示例#19
0
parser.add_argument('-c',
                    '--config',
                    metavar=('CONFIG_FILE'),
                    help='Use the specified configuration file instead ' +
                    'of the default config.ini')
parser.add_argument('-d',
                    '--debug',
                    help='Enables debugging logs.',
                    action='store_true')
parser.add_argument('-l',
                    '--legacy',
                    help='Enables sed usage.',
                    action='store_true')
args = parser.parse_args()

config = Config('config.ini')

# check args, and if none provided, load default config
if args:
    if args.config:
        config = Config(args.config)
    if args.debug:
        Logger.log_info("Enabled debugging.")
        Logger.enable_debugging(Logger)
    if args.legacy:
        Logger.log_info("Enabled sed usage.")
        Adb.enable_legacy(Adb)

script = ALAuto(config)

Adb.service = config.network['service']
示例#20
0
    parser.add_argument("--config",
                        action="store",
                        type=str,
                        required=True,
                        help="config file path")
    parser.add_argument("--mode",
                        action="store",
                        type=str,
                        choices=["train", "resume", "eval"],
                        required=True)
    parser.add_argument("--cpu", action="store_true")
    parser.add_argument("--log",
                        action="store",
                        type=str,
                        required=False,
                        default=None,
                        help="log file path")

    # parse args
    args = parser.parse_args()

    # init config
    config = Config(config_path=args.config)
    config.load_config()

    logging_config(config["checkpoint.save_dir"], args.log)
    config.copy_config()

    # start training
    run(config, args.mode, args.cpu)
from models.loss import SNDisLoss, SNGenLoss, ReconLoss
from util.logger import TensorBoardLogger
from util.config import Config
from data.inpaint_dataset import InpaintDataset
from util.evaluation import AverageMeter
from evaluation import metrics
from PIL import Image
import pickle as pkl
import numpy as np
import logging
import time
import sys
import os

# python train inpaint.yml
config = Config(sys.argv[1])
logger = logging.getLogger(__name__)
time_stamp = time.strftime('%Y%m%d%H%M', time.localtime(time.time()))
log_dir = 'model_logs/{}_{}'.format(time_stamp, config.LOG_DIR)
result_dir = 'result_logs/{}_{}'.format(time_stamp, config.LOG_DIR)
tensorboardlogger = TensorBoardLogger(log_dir)
cuda0 = torch.device('cuda:{}'.format(config.GPU_ID))
cpu0 = torch.device('cpu')


def logger_init():
    """
    Initialize the logger to some file.
    """
    logging.basicConfig(level=logging.INFO)
示例#22
0
 def __init__(self, bot):
     self.bot = bot
     self.conf = Config()
示例#23
0
def get_values_file_location(pack_name: str = None):
    dlsctl_config_dir_path = Config().get_config_path()
    pack = "*" if not pack_name else pack_name
    return f"{dlsctl_config_dir_path}/packs/{pack}/charts/values.yaml"
示例#24
0
#!/usr/bin/env python
from util.sender import Sender
from util.logger import logger
from util.config import Config
from util.receiver import Receiver
import json
import os
from threading import Thread
from time import sleep
import sys
from importlib import import_module
config = Config('./util/config.ini')

try:
    if len(sys.argv) == 1:
        run_count = 1
    else:
        run_count = int(sys.argv[1])
except Exception, e:
    print u'sys args wrong!', repr(e)
    sys.exit(0)


def send_result(name, id, msg):
    tr = {'name': name, '_id': id, 'msg': msg}
    trstr = json.dumps(tr)
    while True:
        try:
            send = Sender(config.rmq_host, config.rmq_user,
                          config.rmq_password, config.ipv4_result_channel)
            send.send_msg(trstr)