def __get_json(self, url):
        token = os.getenv('GITLAB_TOKEN')
        if not token:
            c = Config()
            token = c.data('gitlab', 'token')
        h = {
                'PRIVATE-TOKEN': token
            }
        r = requests.get(url, headers=h)
        r_json = r.json()
        while True:
            if not r.headers.get('Link'):
                break

            a = re.compile('.*<(.+)>; rel="next".*')
            re_list = a.findall(r.headers.get('Link'))

            if not len(re_list):
                break

            next_link = re_list[0]

            r = requests.get(next_link, headers=h)
            r_json += r.json()

        return r_json
Пример #2
0
def launch_instance(connect=True):
    """Launch an instance and wait for it to start running.
    Returns a tuple consisting of the Instance object and the CmdShell object,
    if request, or None.

    -connect tells to perform the SSH connection test to the newly created instance (up to 1 min of time)
    """

    config = Config()

    # Create a connection to EC2 service (assuming credentials are in boto config)
    ec2 = boto.ec2.connect_to_region(config.get("region"))

    # Check to see if specified key pair already exists.
    # If we get an InvalidKeyPair.NotFound error back from EC2,
    # it means that it doesn't exist and we need to create it.
    key_name = config.get("key_name")
    logger = logging.getLogger(__name__)
    try:
        key = ec2.get_all_key_pairs(keynames=[key_name])[0]
    except ec2.ResponseError, e:
        if e.code == "InvalidKeyPair.NotFound":
            output = "Creating key pair: %s" % key_name
            print output
            logger.info(output)
            # Create an SSH key to use when logging into instances.
            key = ec2.create_key_pair(key_name)

            # AWS will store the public key but the private key is
            # generated and returned and needs to be stored locally.
            # The save method will also chmod the file to protect
            # your private key.
            key.save(static.KEY_DIR)
        else:
            raise
Пример #3
0
def apply_time_difference(time):
    """Takes time difference from config file and adjusts the supplied time in accordance with it"""
    config = Config()
    time_diff = int(config.get("time_difference"))
    delta = datetime.timedelta(hours=abs(time_diff))
    if time_diff < 0:
        return time - delta
    else:
        return time + delta
Пример #4
0
 def reinit(self):
     c = Config('facebook')
     
     #if not self.facebook.uid:
     #    self.redirect( self.facebook.get_app_url() )
     
     self.args = dict(
         app_key = c.id('api_key'),
         app_url = 'http://apps.facebook.com/' + c.id('app_name') + '/',
     )
Пример #5
0
def get_console_log(instance_id):
    """Print console log of a given EC2 instance"""
    config = Config()
    ec2 = boto.ec2.connect_to_region(config.get("region"))
    reservations = ec2.get_all_instances(filters={"instance-id": instance_id})
    if reservations:
        instance = reservations[0].instances[0]
        return instance.get_console_output().output
    else:
        return "Instance with id %s not found" % instance_id
Пример #6
0
def prepare_archives():
    config = Config()
    dir_to_save = config.get_home_dir() + "aws/"
    dir_to_archive = config.get_home_dir() + static.JOB_ROOT_DIR
    distutils.archive_util.make_archive(
        dir_to_save + static.JOB_BASE_NAME, static.ARCHIVE_FORMAT, root_dir=dir_to_archive
    )
    dir_to_archive = config.get_home_dir() + static.RC_ROOT_DIR
    distutils.archive_util.make_archive(
        dir_to_save + static.RC_BASE_NAME, static.ARCHIVE_FORMAT, root_dir=dir_to_archive
    )
Пример #7
0
def terminate_instances(instances_to_terminate):
    """Terminate the EC2 instances given their IDs"""
    config = Config()
    logger = logging.getLogger(__name__)

    # Create a connection to EC2 service (assuming credentials are in boto config)
    ec2 = boto.ec2.connect_to_region(config.get("region"))
    reservations = ec2.get_all_instances(filters={"instance-state-name": "running"})

    for reservation in reservations:
        for instance in reservation.instances:
            if instance.id in instances_to_terminate:
                instance.terminate()
                logger.info("AWS EC2 instance %s terminated" % instance.id)
Пример #8
0
Файл: cw.py Проект: dzzh/IN4392
def get_monitoring_information_from_start(config_id,instanceID,metric_q,statistic,unit):

    config = Config(config_id)
    cw = boto.ec2.cloudwatch.connect_to_region(config.get('region'))
    metrics = cw.list_metrics(dimensions={'InstanceId': instanceID}, 
                                     metric_name=metric_q)
    if metrics:
        inst = aws_ec2.get_instance(config, instanceID)
        end_time = datetime.datetime.utcnow()
        start_time=boto.utils.parse_ts(inst.launch_time)

        #get nr of seconds
        diff = end_time - start_time 
        seconds = diff.total_seconds()
    
        #adjust the period
        period=get_adjusted_period(seconds)
        result = metrics[0].query(start_time,end_time,statistic,unit,period)
        return sorted(result, key = operator.itemgetter(u'Timestamp'))
Пример #9
0
def create_load_balancer(zones, env_id):
    config = Config(env_id)
    logger = logging.getLogger(__name__)

    if config.has_option("elb_name"):
        output = "A load balancer already exists for this environment"
        logger.warning(output)
        print (output)
        exit(1)

    region = config.get("region")
    conn = boto.ec2.elb.connect_to_region(region)
    ports = [(80, 80, "http")]

    hc_res = config.get("health_check_resource")
    hc = HealthCheck(interval=30, healthy_threshold=3, unhealthy_threshold=5, target="HTTP:80/%s" % hc_res)

    name = "lb-%s" % env_id
    elb = conn.create_load_balancer(name, zones, ports)
    elb.configure_health_check(hc)
    return name, elb
    def __init__(self):
        if hasattr(self, '_init'):
            return
        self._init = True

        if os.getenv('GERRIT_USERNAME') and os.getenv('GERRIT_PASSWORD'):
            self.auth = HTTPDigestAuth(os.getenv('GERRIT_USERNAME'), os.getenv('GERRIT_PASSWORD'))
        else:
            c = Config()
            self.auth = HTTPDigestAuth(c.data('gerrit', 'username'), c.data('gerrit', 'password'))

        if CACHE_MODE:
            self.project_data_public = self.__load_from_file('cache/gerrit_public.json')
            self.project_data_all = self.__load_from_file('cache/gerrit_all.json')
        else:
            print('initializing all gerrit project ...')
            self.with_auth = True
            self.project_data_all = self.__init_projects()

            print('initializing public gerrit project ...')
            self.with_auth = False
            self.project_data_public = self.__init_projects()
Пример #11
0
 def __init__(self):
     '''
     Constructor
     
     See src/picam.config file for all options.
     More documentation at:
     http://www.raspberrypi.org/wp-content/uploads/2013/07/RaspiCam-Documentation.pdf
     
     These default values can be changed or added to, in the picam.config file.
     '''
     
     # Default values:
     
     # Exposure
     self.photo_ex  = None        
     self.photo_day_ex = 'auto'
     self.photo_night_ex = 'auto'
     
     # Automatic White Balance
     self.photo_awb = None
     self.photo_day_awb = 'auto'
     self.photo_night_awb = 'auto'
     
     # EV level
     self.photo_ev = 0
     
     # Photo dimensions and rotation
     self.photo_width  = 640
     self.photo_height = 480
     self.photo_rotate = 0
     
     self.photo_interval = 60 # Interval between photos (seconds)
     
     self.photo_dir = 'pics'
     self.photo_name = 'photo'
     
     self.dawn = 7
     self.dusk = 20
     
     #Logging
     self.log_dir = '../log/'
             
             
     # Config object used to get values from the config file.
     self.conf = Config()
     
     # Setup logging.
     self.log = self.create_logger()
Пример #12
0
    def __init__(self):
        """instantiate default variables to be used in the class
        :rtype : object
        """
        # logging.basicConfig()  // uncomment for debugging

        this_dir = os.path.dirname(os.path.abspath(__file__))

        # a list of directories that we want to run git commands in
        self.dir = []
        self.config = Config()
        # default directory, if none was passed in
        self.default_dir = self.config.default_dir
        self.git_server = self.config.get_git_server()

        self.gpull_local_location = os.path.join(this_dir, os.pardir, os.pardir, 'gpull_local.py')

        # whether to force pull/checkout or not
        self.force = False

        # what custom branch to pull/checkout
        self.branch = None

        # the username to use for ssh
        self.ssh_user = None

        # ssh password
        self.pw = None

        self.email_to = self.config.email_settings['email_to']

        # recurse through all directories if True, otherwise use git_repos_config instead
        self.all_dirs = False

        # dict of server aliases and properties
        self.server_aliases = self.config.servers

        # list of group aliases, eg. test-all, test-lex, stg-cr
        self.group_aliases = server_config.get_group_aliases(self.server_aliases)

        # dict of open ssh connections, so we can recycle the ones we already have open
        self.connections = {}

        # key to current ssh connection in the connections list
        self.current_conn_key = None
Пример #13
0
    def load_buildc_cfg(file_path, glo_var):
        cfg_file_path = file_path
        var_str       = glo_var
        if var_str == None or var_str == "":
            c = Config.load_config(cfg_file_path)
            return c
        else:
            var_name  = var_str[:var_str.find("=")]
            var_value = var_str[var_str.find("=")+1:]
            var_str = var_name + "=\"" + var_value + "\""

            c = imp.new_module('')
            code_obj = compile(var_str, "", 'exec')
            exec code_obj in c.__dict__

            f = open(cfg_file_path, 'rU')
            code_obj = compile(f.read(), "", 'exec')
            exec code_obj in c.__dict__
            f.close()

            return c
Пример #14
0
#!/home/abc/.pyenv/versions/3.4.3/bin/python3.4
"""
WSGI config for qofc project.

It exposes the WSGI callable as a module-level variable named ``application``.

For more information on this file, see
https://docs.djangoproject.com/en/1.8/howto/deployment/wsgi/
"""
# #!/home/abc/.pyenv/versions/3.4.3/bin/python3.4

import os
import sys

source_dir = os.path.dirname(__file__)
if not source_dir in sys.path:
    sys.path.insert(0, source_dir)

from utils.config import Config
from django.core.wsgi import get_wsgi_application

settings_module = Config.get("config", "main", "settings_module", default="settings.production")

os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_module)

application = get_wsgi_application()
Пример #15
0
class Test(unittest.TestCase):

    conf = None
    
    def setUp(self):
        self.conf = Config()


    def tearDown(self):
        self.conf = None
        

    def test_get_picture_vals_VISUALLY(self):        
        '''
        Since these values can be changed at any time, you can visually read
        the src/picam.config file and compare it to this output.
        '''
        print(self.conf.get_picture_vals())
        
    
    def test_get_log_vals_VISUALLY(self):        
        '''
        Since these values can be changed at any time, you can visually read
        the src/picam.config file and compare it to this output.
        '''
        print(self.conf.get_log_vals())
     
         
    def test_get_logging_vals(self):        
        '''
        Since these values can be changed at any time, you can visually read
        the src/picam.config file and compare it to this output.
        '''
        self.log_dir = 'some_phony_value'
        actual_value = '../log/'
        
        log_vals = self.conf.get_log_vals()
        for (key, val) in log_vals:
            setattr(self, key, val)
            
        self.assertEqual(self.log_dir, actual_value, 'Should be the same values.')
    
    
    def test_NameError_in_get_vals(self):
        '''
        If a value other than pictures, video, or logging is used 
        in __get_vals(), a NameError Exception should be thrown.
        '''
        error_raised = False
        try:
            # Note: you must use this syntax to access a private function.
            self.conf._Config__get_vals('some_bad_value')
        except NameError as ne:
            error_raised = True
            self.assertEqual(True, error_raised, 'NameError should have been raised.')
            print(ne)
            return
            
        self.assertFalse(error_raised, 'Should NOT get here.')


    def test_for_NO_NameError_in_get_vals(self):
        '''
        If a value other than pictures, video, or logging is used 
        in __get_vals(), a NameError Exception should be thrown.
        '''
        error_raised = False
        try:
            # Note: you must use this syntax to access a private function.
            self.conf._Config__get_vals('pictures')
            self.conf._Config__get_vals('video')
            self.conf._Config__get_vals('logging')
        except NameError as ne:
            error_raised = True
            self.assertEqual(True, error_raised, 'Should NOT get here.')
            print(ne)
            return
            
        self.assertFalse(error_raised, 'NameError should NOT have been raised.')
Пример #16
0
#!/usr/local/python34/bin/python3.4
import os
import sys

if __name__ == "__main__":

    from utils.config import Config

    settings_module = Config.get('config', 'main', 'settings_module',
                                 default='settings.production')

    os.environ.setdefault("DJANGO_SETTINGS_MODULE", settings_module)

    from django.core.management import execute_from_command_line

    execute_from_command_line(sys.argv)
Пример #17
0
# -*- coding: utf-8 -*-
import os
# from celery.schedules import crontab
import djcelery
from utils.config import Config

BASE_DIR = os.path.dirname(os.path.dirname(__file__))


PROJECT_DIR = os.path.dirname(BASE_DIR)

# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = '+#gzk#wixbq91s@#e8rz2m6qc)-=d3_i*l=6(a(6bi^%&$!alb'

MAIN_CONFIG = Config.get('config', 'main')

# SECURITY WARNING: don't run with debug turned on in production!

DEBUG = MAIN_CONFIG.get('debug', True)

TEMPLATE_DEBUG = True


ALLOWED_HOSTS = ['*']

COMMON_APPS = [
    'django.contrib.admin',
    'django.contrib.auth',
    'django.contrib.contenttypes',
    'django.contrib.sessions',
    'django.contrib.messages',
Пример #18
0
            self.find_element(*self.re_Deliver_content).click()
            self.find_element(
                *self.re_Deliver_content_IN).send_keys(u"测试具体交接内容")
        except Exception as e:
            print('具体交接内容未找到,原因%s' % e)

    def reApply_Submit(self):
        '''暂存按钮'''
        try:
            self.find_element(*self.re_Submit_button).click()
        except Exception as e:
            print('无法定位暂存按钮 %s' % e)


if __name__ == '__main__':
    URL = Config().get('URL')
    page = RZLoginPage(browser_type='Chrome').get(URL, maximize_window=False)
    page.userlogin()
    result = reApplyLocators(page)
    result.enter_reApply_button()
    sleep(2)
    result.reApply_click_depart()
    sleep(2)
    result.reApplydepart()
    sleep(4)
    result.reApplyposition()
    sleep(2)
    result.reApplyreason()
    sleep(2)
    result.reApply_add()
    sleep(2)
Пример #19
0
    for inst_metric in avg_cpu.instance_metrics:
        current_id = inst_metric.instance.id
        logger.info('Current id: %s' %current_id)
        if current_id == latest_launched_instance_id:
            logger.info('Number of statistical records: %d' %len(inst_metric.metric_records()))
            if len(inst_metric.metric_records()) > 1:
                return datetime.datetime.now() > next_scale_allowed
    return False


def is_downscaling_allowed():
    return datetime.datetime.now() > next_scale_allowed


if __name__ == '__main__':
    config = Config()
    args = parse_args()
    validate_args(args,config)
    config.env_id = str(args.eid)

    logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s',
        level=logging.INFO, filename=config.get_home_dir() + 'monitor.log')
    logger = logging.getLogger(__name__)

    while True:

        #Autoscaling
        avg_cpu = aws_cw.get_avg_cpu_utilization_percentage_for_environment(config)
        percentage, num = avg_cpu.get_average_percentage()
        logger.info('Current CPU utilization for the environment %s is %.2f percent (%d instances)'
                        % (config.env_id, percentage, num))
Пример #20
0
    def test_save_works(self):
        src = path.join(path.dirname(__file__), '..', './testdata',
                        '.env.changes')
        src = path.normpath(src)
        config = Config(file_service, src)

        self.assertEqual(config.get('RES_INDEX'), 0)
        self.assertEqual(config.get('RES_FORMAT'), "")
        self.assertEqual(config.get('DB_USERNAME'), "")
        self.assertEqual(config.get('DB_PASSWORD'), "")
        self.assertEqual(config.get('DB_SERVER'), "ohte.bu0r9.mongodb.net")
        self.assertEqual(config.get('DB_NAME'), "muistio")
        self.assertIsNotNone(config.get('DATABASE_URI'))

        config.set_value('RES_INDEX', "1")
        config.set_value('RES_FORMAT', 'auto')
        config.set_value('DB_USERNAME', "username")
        config.set_value('DB_PASSWORD', "password")

        config.save()

        configs = {}
        configs["RES_INDEX"] = "1"
        configs["RES_FORMAT"] = "auto"
        configs["DB_USERNAME"] = "******"
        configs["DB_PASSWORD"] = "******"
        configs["DB_SERVER"] = "ohte.bu0r9.mongodb.net"
        configs["DB_NAME"] = "muistio"
        configs["DATABASE_URI"] = "mongodb+srv://username:password" \
            + "@ohte.bu0r9.mongodb.net/muistio?retryWrites=true&w=majority"

        self.assertEqual(config.get_all(), configs)

        remove(src)
Пример #21
0
def main(**kwargs):

    # if kwargs['data_source'] == 'assist2009_updated':
    #     kwargs['num_concept'] = 110
    # elif kwargs['data_source'] == 'assist2015':
    #     kwargs['num_concept'] = 100
    # elif kwargs['data_source'] == 'assist2017':
    #     kwargs['num_concept'] = 102
    # elif kwargs['data_source'] == 'STATICS':
    #     kwargs['num_concept'] = 1223
    # elif kwargs['data_source'] == 'synthetic':
    #     kwargs['num_concept'] = 50
    #
    # kwargs['input_dim'] = 2 * kwargs['num_concept']
    # kwargs['output_dim'] = kwargs['num_concept']

    opt = Config()

    for k_, v_ in kwargs.items():
        setattr(opt, k_, v_)

    if opt.vis:
        vis = Visualizer(opt.env)
    else:
        vis = None

    init_loss_file(opt)
    if opt.data_source == "STATICS" or opt.data_source == "assist2017":
        opt.fold_dataset = True
    train_path, valid_path, test_path = init_file_path(opt)
    # print(opt.fold_dataset)

    # random_state = random.randint(1, 50)
    # print("random_state:", random_state)
    train_dataset = KTData(train_path,
                           fold_dataset=opt.fold_dataset,
                           q_numbers=opt.output_dim,
                           opt='None')
    valid_dataset = KTData(valid_path,
                           fold_dataset=opt.fold_dataset,
                           q_numbers=opt.output_dim,
                           opt='None')
    test_dataset = KTData(test_path,
                          fold_dataset=opt.fold_dataset,
                          q_numbers=opt.output_dim,
                          opt='None')

    print(len(train_dataset), len(valid_dataset), len(test_dataset))

    train_loader = DataLoader(train_dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              drop_last=True,
                              collate_fn=myutils.collate_fn)
    valid_loader = DataLoader(valid_dataset,
                              batch_size=opt.batch_size,
                              shuffle=True,
                              num_workers=opt.num_workers,
                              drop_last=True,
                              collate_fn=myutils.collate_fn)
    test_loader = DataLoader(test_dataset,
                             batch_size=opt.batch_size,
                             shuffle=True,
                             num_workers=opt.num_workers,
                             drop_last=True,
                             collate_fn=myutils.collate_fn)

    if opt.model_name == "EKT":
        model = EKTM_dev(knowledge_length=opt.output_dim,
                         knowledge_emb_size=opt.knowledge_emb_size,
                         seq_hidden_size=opt.seq_hidden_size,
                         is_text=opt.is_text,
                         text_emb_size=opt.text_emb_size,
                         gpu=opt.gpu)

    lr = opt.lr
    last_epoch = -1

    optimizer = torch.optim.Adam(params=model.parameters(),
                                 lr=lr,
                                 weight_decay=opt.weight_decay,
                                 betas=(0.9, 0.99))
    if opt.model_path:
        map_location = lambda storage, loc: storage
        checkpoint = torch.load(opt.model_path, map_location=map_location)
        model.load_state_dict(checkpoint["model"])
        last_epoch = checkpoint["epoch"]
        lr = checkpoint["lr"]
        optimizer.load_state_dict(checkpoint["optimizer"])

    model = model.to(opt.device)

    loss_result = {}
    auc_resilt = {}
    best_val_auc = 0
    corr_train_auc = 0
    corr_test_auc = 0
    # START TRAIN
    for epoch in range(opt.max_epoch):
        if epoch < last_epoch:
            continue
        if opt.model_name == "EKT":
            train_loss_meter, train_auc, train_loss_list = run_ekt.train_ekt(
                opt, vis, model, train_loader, epoch, lr, optimizer)
            val_loss_meter, val_auc, val_loss_list = run_ekt.valid_ekt(
                opt, vis, model, valid_loader, epoch)
            test_loss_meter, test_auc, test_loss_list = run_ekt.valid_ekt(
                opt, vis, model, test_loader, epoch)

        loss_result["train_loss"] = train_loss_meter.value()[0]
        auc_resilt["train_auc"] = train_auc
        loss_result["val_loss"] = val_loss_meter.value()[0]
        auc_resilt["val_auc"] = val_auc
        loss_result["test_loss"] = test_loss_meter.value()[0]
        auc_resilt["test_auc"] = test_auc

        for k, v in loss_result.items():
            print("epoch:{epoch}, {k}:{v:.5f}".format(epoch=epoch, k=k, v=v))
            if opt.vis:
                vis.line(X=np.array([epoch]),
                         Y=np.array([v]),
                         win="loss",
                         opts=dict(title="loss", showlegend=True),
                         name=k,
                         update='append')
        for k, v in auc_resilt.items():
            print("epoch:{epoch}, {k}:{v:.5f}".format(epoch=epoch, k=k, v=v))
            if opt.vis:
                vis.line(X=np.array([epoch]),
                         Y=np.array([v]),
                         win="auc",
                         opts=dict(title="auc", showlegend=True),
                         name=k,
                         update='append')

        if val_auc > best_val_auc:
            best_val_auc = val_auc
            corr_train_auc = train_auc
            corr_test_auc = test_auc
            print("best_val_auc: {}".format(best_val_auc))

        # TODO 每个epoch结束后把loss写入文件
        # myutils.save_loss_file(opt, epoch, train_loss_list, val_loss_list, test_loss_list)

        # TODO 每save_every个epoch结束后保存模型参数+optimizer参数
        if epoch % opt.save_every == 0:
            myutils.save_model_weight(opt, model, optimizer, epoch, lr)

        # TODO 做lr_decay
        lr = myutils.adjust_lr(opt, optimizer, epoch,
                               train_loss_meter.value()[0])

    # TODO 结束的时候保存final模型参数
    myutils.save_model_weight(opt, model, optimizer, epoch, lr, is_final=True)

    print(
        "DATASET: {}, knowledge_emb_size: {}, seq_hidden_size: {}, text_emb_size: {}"
        .format(opt.data_source, opt.knowledge_emb_size, opt.seq_hidden_size,
                opt.text_emb_size))
    print("best_val_auc:{}, corr_train_auc: {}, corr_test_auc: {}".format(
        best_val_auc, corr_train_auc, corr_test_auc))
Пример #22
0
class TestConfig(unittest.TestCase):
    def setUp(self):
        self.file_service = file_service
        self.src = path.join(path.dirname(__file__), '..', './testdata',
                             '.test.env')
        self.src = path.normpath(self.src)
        self.config = Config(self.file_service, self.src)

    def test_initialize_premade_config_works(self):
        src = path.join(path.dirname(__file__), '..', './testdata',
                        '.test.env')
        src = path.normpath(src)
        config = Config(file_service, src)

        self.assertFalse(config.initialized_first_time)
        self.assertIsNotNone(config.get_all())

    def test_initialize_without_previous_config_works(self):
        src = path.join(path.dirname(__file__), '..', './testdata',
                        '.env.notexists')
        src = path.normpath(src)
        config = Config(file_service, src)

        configs = config.get_all()
        self.assertTrue(config.initialized_first_time)
        self.assertIsNotNone(configs)
        self.assertEqual(configs["RES_INDEX"], 0)
        self.assertEqual(configs["DB_NAME"], "muistio")

    def test_get_setting_works(self):
        self.assertEqual(self.config.get('RES_INDEX'), "5")
        self.assertEqual(self.config.get('RES_FORMAT'), "1650x1050")
        self.assertEqual(self.config.get('DB_USERNAME'), "test")
        self.assertEqual(self.config.get('DB_PASSWORD'), "test")
        self.assertEqual(self.config.get('DB_SERVER'),
                         "ohte.bu0r9.mongodb.net")
        self.assertEqual(self.config.get('DB_NAME'), "muistio")
        self.assertIsNotNone(self.config.get('DATABASE_URI'))

    def test_get_all_works(self):
        configs = {}
        configs["RES_INDEX"] = "5"
        configs["RES_FORMAT"] = "1650x1050"
        configs["DB_USERNAME"] = "******"
        configs["DB_PASSWORD"] = "******"
        configs["DB_SERVER"] = "ohte.bu0r9.mongodb.net"
        configs["DB_NAME"] = "muistio"
        configs["DATABASE_URI"] = "mongodb+srv://test:test" \
            + "@ohte.bu0r9.mongodb.net/muistio?retryWrites=true&w=majority"

        self.assertEqual(self.config.get_all(), configs)

    def test_set_value_works(self):
        self.config.set_value('RES_INDEX', '1')
        self.config.set_value('RES_FORMAT', "auto")
        self.assertEqual(self.config.get('RES_INDEX'), "1")
        self.assertEqual(self.config.get('RES_FORMAT'), "auto")

    def test_set_empty_value_raises_value_error(self):
        try:
            self.config.set_value('RES_INDEX', '')
        except ValueError as error:
            self.assertRaises(ValueError)
            self.assertEqual(error.args[0], "Value can not be empty")

    def test_save_works(self):
        src = path.join(path.dirname(__file__), '..', './testdata',
                        '.env.changes')
        src = path.normpath(src)
        config = Config(file_service, src)

        self.assertEqual(config.get('RES_INDEX'), 0)
        self.assertEqual(config.get('RES_FORMAT'), "")
        self.assertEqual(config.get('DB_USERNAME'), "")
        self.assertEqual(config.get('DB_PASSWORD'), "")
        self.assertEqual(config.get('DB_SERVER'), "ohte.bu0r9.mongodb.net")
        self.assertEqual(config.get('DB_NAME'), "muistio")
        self.assertIsNotNone(config.get('DATABASE_URI'))

        config.set_value('RES_INDEX', "1")
        config.set_value('RES_FORMAT', 'auto')
        config.set_value('DB_USERNAME', "username")
        config.set_value('DB_PASSWORD', "password")

        config.save()

        configs = {}
        configs["RES_INDEX"] = "1"
        configs["RES_FORMAT"] = "auto"
        configs["DB_USERNAME"] = "******"
        configs["DB_PASSWORD"] = "******"
        configs["DB_SERVER"] = "ohte.bu0r9.mongodb.net"
        configs["DB_NAME"] = "muistio"
        configs["DATABASE_URI"] = "mongodb+srv://username:password" \
            + "@ohte.bu0r9.mongodb.net/muistio?retryWrites=true&w=majority"

        self.assertEqual(config.get_all(), configs)

        remove(src)
Пример #23
0
 def setUp(self):
     self.file_service = file_service
     self.src = path.join(path.dirname(__file__), '..', './testdata',
                          '.test.env')
     self.src = path.normpath(self.src)
     self.config = Config(self.file_service, self.src)
Пример #24
0
consoleHandler = logging.StreamHandler(sys.stdout)
consoleHandler.setFormatter(logFormatter)
rootLogger.addHandler(consoleHandler)

# File logger
fileHandler = RotatingFileHandler(os.path.join(
    os.path.dirname(os.path.realpath(sys.argv[0])), 'activity.log'),
                                  maxBytes=1024 * 1024 * 5,
                                  backupCount=5,
                                  encoding='utf-8')

fileHandler.setFormatter(logFormatter)
rootLogger.addHandler(fileHandler)

# Config
cfg = Config(rootLogger)

# Logger
log = rootLogger.getChild("rtorrent_orphan_cleanup")

# Rtorrent
rtorrent = None

############################################################
# FUNCTIONS
############################################################


def remove_path(file_path, auto_remove=False):
    if not auto_remove:
        log.warning("Do you want to remove: %s (y/n)", file_path)
Пример #25
0
 def __init__(self, **kwargs):
     self.__dict__.update(self._defaults)
     self.class_names = self._get_class()
     self.config = Config()
     self.generate()
     self.bbox_util = BBoxUtility()
Пример #26
0
# %%
import pandas as pd
from pandas.api.types import union_categoricals
import numpy as np
from utils.config import Config

#%%
c = Config()
c.validate_files()

# %%
df = pd.read_csv(c.uid_300K)
# %%
uid1 = df.uid
# %%
print(f"size of uid1: {uid1.size:,}")
# %%
df2 = pd.read_csv(c.uid_18M)
# %%
uid2 = df2.uid

# %%
print(f"size of uid2: {uid2.size:,}")

#%%
# merge cats1 and cats2 series

uid = pd.merge(uid2, uid1, how='outer', indicator='source')

#%%
# cats.groupby('source').nunique()
Пример #27
0
class GitUtils(object):
    """
    Utility class for Git helper commands
    """
    def __init__(self):
        """instantiate default variables to be used in the class
        :rtype : object
        """
        # logging.basicConfig()  // uncomment for debugging

        this_dir = os.path.dirname(os.path.abspath(__file__))

        # a list of directories that we want to run git commands in
        self.dir = []
        self.config = Config()
        # default directory, if none was passed in
        self.default_dir = self.config.default_dir
        self.git_server = self.config.get_git_server()

        self.gpull_local_location = os.path.join(this_dir, os.pardir, os.pardir, 'gpull_local.py')

        # whether to force pull/checkout or not
        self.force = False

        # what custom branch to pull/checkout
        self.branch = None

        # the username to use for ssh
        self.ssh_user = None

        # ssh password
        self.pw = None

        self.email_to = self.config.email_settings['email_to']

        # recurse through all directories if True, otherwise use git_repos_config instead
        self.all_dirs = False

        # dict of server aliases and properties
        self.server_aliases = self.config.servers

        # list of group aliases, eg. test-all, test-lex, stg-cr
        self.group_aliases = server_config.get_group_aliases(self.server_aliases)

        # dict of open ssh connections, so we can recycle the ones we already have open
        self.connections = {}

        # key to current ssh connection in the connections list
        self.current_conn_key = None

    def remote_pull(self, paths, branch, force, servers, remote_user, pw, all_dirs=False, remote_path=None):
        """
        Do a git pull on remote servers
        :param paths: list of paths to update
        :param branch: string of branch to pull
        :param force: bool if true, then do a force checkout (overwriting uncommitted changes)
        :param servers: list of servers to update
        :param remote_user: string of ssh user
        :param pw: string ssh password
        :param all_dirs:
        :param remote_path
        :return:
        """

        if remote_path is not None:
            self.gpull_local_location = remote_path

        if paths is not None:
            # loop through paths
            for path in paths:
                # if no slash exists, then append the given directory to the default path
                if '/' not in path:
                    self.dir.append("".join(self.default_dir) + '/' + path)
                # otherwise, just append the path to the list of directories
                else:
                    self.dir.append(path)
        else:
            # if no path was passed in, just use the default directory
            self.dir.append(self.default_dir)

        if force is True or force is None:
            self.force = True

        if all_dirs is True or all_dirs is None:
            self.all_dirs = True

        if branch is not None:
            self.branch = branch

        settings = user_settings.UserSettings()
        if remote_user is not None:
            settings.save_ssh_user(remote_user)
            self.ssh_user = remote_user
        else:
            # attempt to get user from db.
            self.ssh_user = settings.get_ssh_user()

        self.pw = pw

        self.update_servers(servers)

    def update_servers(self, servers):
        """
        loop through servers, and run commands on them.
        :param servers: list of servers
        :return: void
        """

        if servers is None:
            # run locally
            self.update_server(None, 'localhost')
        else:
            for srv in servers:
                # if the server is an actual server and not a group of servers:
                if srv in self.server_aliases:
                    for url in self.server_aliases[srv]['url']:
                        # grab configuration from the server aliases dictionary, and run the script on the server
                        self.update_server(
                            srv,
                            url)
                # if this is a server group alias
                elif srv in self.group_aliases:

                        # we need to run through each server individually
                        for srv_alias in self.group_aliases[srv]['servers']:
                            for url in self.server_aliases[srv_alias]['url']:
                                # legacy servers require direct ssh connection
                                self.update_server(
                                    srv_alias,
                                    url,
                                    self.server_aliases[srv_alias]['git_user'])

            # at the end of the loop, close all connection instances
            for connection in self.connections.values():
                connection.close()

    def update_server(self, ssh_alias=None, url=None, git_user='******'):
        """
        Update Individual Server
        :param ssh_alias:
        :param url:
        :param git_user:
        :return:
        """
        # run this file on the desired server.
        command = "python -u " + self.gpull_local_location

        if ssh_alias is not None:
            # start a remote connection to the server
            command += " -u {} -e {} -n '{}' ".format(git_user, self.email_to, self.ssh_user)
            if self.start_ssh(url) is False:
                # failed connection, so don't continue updating directories
                return False

        # add path:
        command += " -p {}".format(' '.join(self.dir))

        # run through optional commands (force, branch)
        if self.branch is not None:
            command += " -b {}".format(self.branch)

        if self.force:
            command += " -f "

        if self.all_dirs:
            command += " -a "

        out(0, green("running git updates on " + url))

        out(0, self.exec_shell(command))

    def git_merge_all(self, from_branch, to_branch, working_path='/var/release'):
        """
        Merge all Git Repositories from one branch into another.
        :param from_branch: What branch to merge from
        :param to_branch: What branch to merge into
        :param working_path:
        :return:
        """
        if not os.path.exists(working_path):
            # if path doesn't exist, create it:
            os.mkdir(working_path)

        os.chdir(working_path)

        for repo in self.config.repositories:
            os.chdir(working_path)
            out(1, blue("\n------- REPO: " + repo + " -------"))
            # see if the repo exists
            path = working_path+'/'+repo

            output = ''
            try:
                if not os.path.exists(path):
                    output += self.exec_shell('git clone '+self.git_server+'/'+repo+'.git ' + path)

                    if 'Access denied.' in output:
                        out(2, yellow('skipped'))
                        continue

                os.chdir(path)

                output += self.exec_shell('git reset --hard HEAD')
                output += self.exec_shell('git checkout --force {}'.format(from_branch))
                output += self.exec_shell('git pull')
                output += self.exec_shell('git checkout --force {}'.format(to_branch))
                output += self.exec_shell('git pull')
                output += self.exec_shell('git merge {}'.format(from_branch))
                output += self.exec_shell('git push origin {}'.format(to_branch))

                for line in output.splitlines(True):
                    if line.startswith('error') or line.startswith('CONFLICT'):
                        out(2, red(line))
                    else:
                        out(2, green(line))

            except Exception as e:
                out(2, red('Error: '))
                out(2, red(output))
                out(2, red(e))
                return False
        return output

    def start_ssh(self, url):
        """
        start an ssh connection
        :param url:
        :return:
        """
        # use current user if none was passed in.
        if self.ssh_user is None:
            self.ssh_user = getpass.getuser()

        # if we haven't already started this connection, start it
        if url not in self.connections:
            try:
                # paramiko.util.log_to_file("paramiko.log")
                ssh = paramiko.SSHClient()
                ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
                ssh.allow_agent = False
                ssh.connect(url, username=self.ssh_user, password=self.pw)

            except Exception as e:
                out(0, red("SSH connection to {} failed: ".format(url)))
                print(e)
                return False
            # add this connection to the list of open connections
            self.connections[url] = ssh

        # set ssh_alias as the current connection key to be used in exec_shell
        self.current_conn_key = url

        return True

    def exec_shell(self, command):
        """
        Execute a shell command and get the output.
        :param command: script command
        :return: string | False
        """
        if self.current_conn_key:
            ssh = self.connections[self.current_conn_key]
            encoded = pipes.quote(self.pw)
            sudo_cmd = "echo {pw} | sudo -S ".format(pw=encoded)

            stdin, stdout, stderr = ssh.exec_command(sudo_cmd + command, get_pty=True)
            out(0, stdout.read())

            if stderr:
                for line in stderr.readlines():
                    line = line.strip()
                    # ignore sudo password prompts
                    if '[sudo] password for' not in line:
                        out(0, line)
        else:
            try:
                # try to run the process, or return an error
                process = subprocess.Popen(shlex.split(command), bufsize=0,
                                           stdout=subprocess.PIPE, stderr=subprocess.STDOUT)

                stdout, stderr = process.communicate()

                if stdout is not None:
                    return stdout
            except subprocess.CalledProcessError as e:
                print("Could not finish your request: " + e.output.decode('UTF-8'))
                return False

        return ''
# coding=utf-8
import json
import unittest
from utils.client import HttpClient
from utils.config import Config
from parameterized import parameterized
from utils.log import logger

source_dict = dict(city='深圳',
                   key=Config().get('weather_key', index=1),
                   error_code=0)


class TestWeather(unittest.TestCase):
    URL = Config().get('juhe_simple_weather')

    def setUp(self):
        self.client = HttpClient(url=self.URL, method='GET')

    #使用参数化测试接口,注意传入参数的格式
    @parameterized.expand([['深圳', 'de5148ef8cabaf81c0d1c83c84098379', 1]])
    def test_response(self, city, key, error_code):
        params = dict(city=city, key=key)
        response = self.client.send(params=params)
        # logger.info('响应结果%s'%(json.dumps(response.json(),ensure_ascii=False,indent=4)))
        cur_error_code = response.json()['error_code']
        self.assertEqual(error_code, cur_error_code)


if __name__ == '__main__':
    # print('请求的参数',params)
Пример #29
0
class RaspiCam(object):

    
    def __init__(self):
        '''
        Constructor
        
        See src/picam.config file for all options.
        More documentation at:
        http://www.raspberrypi.org/wp-content/uploads/2013/07/RaspiCam-Documentation.pdf
        
        These default values can be changed or added to, in the picam.config file.
        '''
        
        # Default values:
        
        # Exposure
        self.photo_ex  = None        
        self.photo_day_ex = 'auto'
        self.photo_night_ex = 'auto'
        
        # Automatic White Balance
        self.photo_awb = None
        self.photo_day_awb = 'auto'
        self.photo_night_awb = 'auto'
        
        # EV level
        self.photo_ev = 0
        
        # Photo dimensions and rotation
        self.photo_width  = 640
        self.photo_height = 480
        self.photo_rotate = 0
        
        self.photo_interval = 60 # Interval between photos (seconds)
        
        self.photo_dir = 'pics'
        self.photo_name = 'photo'
        
        self.dawn = 7
        self.dusk = 20
        
        #Logging
        self.log_dir = '../log/'
                
                
        # Config object used to get values from the config file.
        self.conf = Config()
        
        # Setup logging.
        self.log = self.create_logger()
    
    
    def create_logger(self):
        '''
        Creates and returns the logging object used throughout the application.
        '''
        myLogger = Logger()
        self.set_log_vars_from_config()
        myLogger.LOG_DIR = self.log_dir
        myLogger.LOG_FILENAME = 'RaspiCam.out'
        mylog = myLogger.createLogger(name='RaspiCam')
        
        return mylog
    
    
    def set_log_vars_from_config(self):
        '''
        This method will set the log file directory, from:
           src/picam.config
           
        If the config file does NOT exist, then the default values in the 
        constructor will be used.
        '''        
        log_vals = self.conf.get_log_vals()
        for (key, val) in log_vals:
            setattr(self, key, val)
     
        
    def take_pics(self):
        '''
        This method uses the linux raspistill application to take pictures.
        At night, the exposure is set to 'night'.
        Pictures are taken every minute (default).  This time can be changed in
        the picam.config file.
        '''
        # Lets start taking photos!
        try:                    
            while True:
                self.set_pic_vars_from_config()
                filename = self.create_photo_filename_and_dir()                
                
                # Set Exposure and Automatic White Balance for day or night time.
                self.set_ex_and_awb()
                
                # This command takes a single picture.
                cmd = 'raspistill -o ' + filename \
                                       + ' -t 1000 ' \
                                       + ' -ex ' + self.photo_ex \
                                       + ' -awb ' + self.photo_awb \
                                       + ' -ev ' + str(self.photo_ev) \
                                       + ' -w ' + str(self.photo_width) \
                                       + ' -h ' + str(self.photo_height) \
                                       + ' -rot ' + str(self.photo_rotate)
                subprocess.call(cmd, shell=True)
                
                # Sleep until it is time for the next picture.
                time.sleep(self.photo_interval)
        
        except Exception as e:
            #print(e)
            self.log.error(e)
        except KeyboardInterrupt:
            # User quit
            #print("\nGoodbye!")
            self.log.info("Goodbye!")
    
    
    def set_pic_vars_from_config(self):
        '''
        This method will set attributes for pictures, from:
           src/picam.config
           
        If the config file does NOT exist, then the default values in the 
        constructor will be used.
        '''        
        pic_vals = self.conf.get_picture_vals()
        for (key, val) in pic_vals:
            setattr(self, key, val)
            
            
    def create_photo_filename_and_dir(self):
        '''
        This method will create a base directory using the photo_dir config 
        variable.
        A sub directory will be created in the format of Year_Month_Day.
        The filename consists of the 'name' argument passed into the method 
        and a timestamp.
        
        Ex. ./pics/2013_08_30/photo_2013-08-30_09-59-09.501599.jpg
        '''
        # Create directory if it doesn't already exist.
        directory = self.photo_dir + '/' + strftime('%Y_%m_%d')
        self.create_dir(directory)
        
        filename = directory + '/' + self.photo_name + '_' + self.current_timestamp() + '.jpg'
        
        return filename
        
        
    def create_dir(self, directory):
        '''
        This method will create a directory if it doesn't already exist.
        '''
        try:
            # os.makedirs throws an exception if the directory already exists.
            os.makedirs(directory, 0755)
        except Exception:
            # No need to print out error message every time a file is created.
            pass
            
            
    def set_ex_and_awb(self):
        '''
        This method changes the Exposure and Automatic White Balance for
        day or night time use.
        '''
        try:
            if self.is_night_time(dawn=self.dawn, dusk=self.dusk):
                self.photo_ex = self.photo_night_ex
                self.photo_awb = self.photo_night_awb
            else:
                self.photo_ex = self.photo_day_ex
                self.photo_awb = self.photo_day_awb      
        except ValueError as ve:
            print(ve + ' Default values will be used.')  
            self.log.error(ve + ' Default values will be used.')              
        
        
    def current_timestamp(self):
        '''
        Returns a timestamp in the following format:
            year-month-day_hour-min-sec
            2013-08-25_09-31-59.126116
        '''
        # Remove spaces and colons.
        now = str(datetime.now()).replace(' ', '_')
        return now.replace(':', '-')

        
    def is_night_time(self, dawn=7, dusk=20):
        '''
        Returns True if night time else False.
        Night time is considered to be the hours between dawn and dusk. 
        We are making the assumption that dusk or dawn are never the same.
        
        The time range is [0:23].
        Ex. 8:00 AM is 8 while 8:00 PM is 20.
        
        :param dawn:  Defaulted to 7:00 AM.
        :param dusk:  Defaulted to 8:00 PM.
        '''
        # dawn and dusk cannot be the same value.
        if dawn == dusk:
            raise ValueError('dawn and dusk cannot be the same value')
        
        is_night = True        
        now = time.localtime()
        the_hour = now.tm_hour
            
        # Day time is when the_hour is NOT between dusk and dawn.
        
        # dusk before midnight and dawn after midnight
        if (dusk <= 23 and dusk >= 12) and (dawn >= 0 and dawn <= 12):
            if the_hour < dusk and the_hour > dawn:
                is_night = False
        
        # dusk before midnight and dawn before midnight
        elif (dusk <= 23 and dusk >= 12) and (dawn <= 23 and dawn >= 12):
            if the_hour < dusk:
                is_night = False
        
        # dusk after midnight and dawn after midnight
        elif (dusk >= 0 and dusk <= 12) and (dawn >= 0 and dawn <= 12):
            if the_hour < dusk:
                is_night = False
    
        return is_night 
Пример #30
0
class test_heavy(unittest.TestCase):
    Config.set_env(Config.ENV_TEST)

    #sorry https://aiven.io I made a lot of requests!

    def test_heavy(self):
        prod, cons = app.run(Config.K_MONITOR_TEST_TOPIC,
                             Config.PS_DATABASE_NAME,
                             Config.PS_TEST_WEBSITE_TABLE_NAME,
                             "tests/t_monitor_heavy_test.yml")

        interval = File.read_time_interval("tests/t_monitor_heavy_test.yml")

        time.sleep(interval - 1)

        app.stop_monitor(prod, cons)

        admin_client = KafkaAdminClient(
            bootstrap_servers=[Config.K_HOST + ':' + Config.K_PORT],
            security_protocol=Config.K_SECURITY_PROTOCOL,
            ssl_cafile=Config.K_SSL_CAT_FILE,
            ssl_certfile=Config.K_SSL_CERT_FILE,
            ssl_keyfile=Config.K_SSL_KEY_FILE)

        admin_client.delete_topics([Config.K_MONITOR_TEST_TOPIC])

        self.assertEqual(prod.get_message_count(), cons.get_message_count())

    def test_validate_number_in_interval(self):
        prod, cons = app.run(Config.K_MONITOR_TEST_TOPIC,
                             Config.PS_DATABASE_NAME,
                             Config.PS_TEST_WEBSITE_TABLE_NAME,
                             "tests/t_monitor_heavy_test.yml")

        interval = File.read_time_interval("tests/t_monitor_heavy_test.yml")

        time.sleep(interval - 1)

        app.stop_monitor(prod, cons)

        admin_client = KafkaAdminClient(
            bootstrap_servers=[Config.K_HOST + ':' + Config.K_PORT],
            security_protocol=Config.K_SECURITY_PROTOCOL,
            ssl_cafile=Config.K_SSL_CAT_FILE,
            ssl_certfile=Config.K_SSL_CERT_FILE,
            ssl_keyfile=Config.K_SSL_KEY_FILE)

        admin_client.delete_topics([Config.K_MONITOR_TEST_TOPIC])

        monitors = File.read_monitors("tests/t_monitor_heavy_test.yml")

        #send messages equals total urls count in 1 cycle
        self.assertEqual(prod.get_message_count(), len(monitors))

    def test_validate_number_in_interval_double(self):
        prod, cons = app.run(Config.K_MONITOR_TEST_TOPIC,
                             Config.PS_DATABASE_NAME,
                             Config.PS_TEST_WEBSITE_TABLE_NAME,
                             "tests/t_monitor_heavy_test.yml")

        interval = File.read_time_interval("tests/t_monitor_heavy_test.yml")

        time.sleep(interval * 2)

        app.stop_monitor(prod, cons)

        admin_client = KafkaAdminClient(
            bootstrap_servers=[Config.K_HOST + ':' + Config.K_PORT],
            security_protocol=Config.K_SECURITY_PROTOCOL,
            ssl_cafile=Config.K_SSL_CAT_FILE,
            ssl_certfile=Config.K_SSL_CERT_FILE,
            ssl_keyfile=Config.K_SSL_KEY_FILE)

        admin_client.delete_topics([Config.K_MONITOR_TEST_TOPIC])

        monitors = File.read_monitors("tests/t_monitor_heavy_test.yml")

        #send messages equals total urls count in 2 cycle is double the urls size
        self.assertEqual(prod.get_message_count(), len(monitors) * 2)
Пример #31
0
class Cosmos(object):
    """A baseball cosmos."""

    def __init__(self, debug=True):
        """Initialize a Cosmos object."""
        self.debug = debug
        # Determine and display an official Baseball Cosmos ID :)
        self.id = self._init_cosmos_id()
        if debug:
            print "Preparing {self}...".format(self=self)
        # Load the config parameters
        self.config = Config()
        # Prepare Thoughts class
        Thoughts.thought_prototypes = [
            ThoughtPrototype(tag=spec[0], likelihood=spec[1], preconditions=spec[2], effects=spec[3])
            for spec in self.config.thought_prototype_specifications
        ]
        # Load the city data (specifies data about all cities that will eventually
        # be established in this simulation)
        self.city_data = CityData()
        # Load the NLG module for this game instance, etc.
        self.productionist = Productionist(game=self)
        self.errors = []
        self.problems = []
        # This gets incremented each time a new person is born/generated,
        # which affords a persistent ID for each person
        self.current_person_id = 0
        self.current_place_id = 0
        # Determine whether baseball curses are real in this baseball cosmos
        self.curses_are_real = random.random() < self.config.chance_baseball_curses_are_real
        # Prepare attributes relating to time
        # self.year = self.config.year_worldgen_begins
        # self.true_year = self.config.year_worldgen_begins  # True year never gets changed during retconning
        self.ordinal_date = datetime.date(*self.config.date_worldgen_begins).toordinal()  # N days since 01-01-0001
        self.year = datetime.date(*self.config.date_worldgen_begins).year
        self.true_year = self.year  # True year never gets changed during retconning
        self.month = datetime.date(*self.config.date_worldgen_begins).month
        self.day = datetime.date(*self.config.date_worldgen_begins).day
        self.time_of_day = 'day'
        self.date = self.get_date()
        # Prepare a listing of all in-game events, which will facilitate debugging later
        self.events = []
        # A game's event number allows the precise ordering of events that
        # happened on the same timestep -- every time an event happens, it requests an
        # event number from Game.assign_event_number(), which also increments the running counter
        self.event_number = -1
        # Prepare a listing of all people born on each day -- this is used to
        # age people on their birthdays; we start with (2, 29) initialized because
        # we need to perform a check every March 1 to ensure that all leap-year babies
        # celebrate their birthday that day on non-leap years
        self.birthdays = {(2, 29): set()}
        # Prepare a number that will hold a single random number that is generated daily -- this
        # facilitates certain things that should be determined randomly but remain constant across
        # a timestep, e.g., whether a person locked their door before leaving home
        self.random_number_this_timestep = random.random()
        # self.establish_setting()
        # self._sim_and_save_a_week_of_timesteps()
        self.weather = None
        # Prepare geographic listings
        self.countries = []
        self.states = []
        self.cities = []
        # Instantiate a first country
        Country(name='United States of America', cosmos=self)
        # Prepare baseball-centric attributes
        self.baseball_classifications = [
            # TODO MAKE THIS BOTTOM-UP; HAVE AGENTS NEGOTIATE TO COMPOSE/MODIFY CLASSES
            Class(cosmos=self, level='AAA'),
            InformalPlay(cosmos=self)
        ]
        self.leagues = []  # Leagues based here

    @staticmethod
    def _init_cosmos_id():
        """Randomly determine an eight-digit cosmos ID."""
        return "".join(str(int(random.random()*10)) for _ in xrange(8))

    def __str__(self):
        """Return string representation."""
        return "Baseball Cosmos {cosmos_id}".format(cosmos_id=self.id)

    @property
    def people(self):
        """Return a list of all people living in the game world."""
        return list(self.residents)

    @property
    def residents(self):
        """Return a list of all people living in the game world."""
        residents = []
        for country in self.countries:
            residents += list(country.residents)
        return residents

    @property
    def random_person(self):
        """Return a random person living in this game world."""
        random_country = random.choice(self.countries)
        return random.choice(list(random_country.residents))

    @property
    def major_league_team_nicknames(self):
        major_nicknames = set()
        for league in self.leagues:
            for team in league.teams:
                major_nicknames.add(team.nickname)
        return major_nicknames

    def assign_event_number(self, new_event):
        """Assign an event number to some event, to allow for precise ordering of events that happened same timestep.

        Also add the event to a listing of all in-game events; this facilitates debugging.
        """
        self.events.append(new_event)
        self.event_number += 1
        return self.event_number

    @staticmethod
    def get_random_day_of_year(year):
        """Return a randomly chosen day in the given year."""
        ordinal_date_on_jan_1_of_this_year = datetime.date(year, 1, 1).toordinal()
        ordinal_date = (
            ordinal_date_on_jan_1_of_this_year + random.randint(0, 365)
        )
        datetime_object = datetime.date.fromordinal(ordinal_date)
        month, day = datetime_object.month, datetime_object.day
        return month, day, ordinal_date

    def get_date(self, ordinal_date=None):
        """Return a pretty-printed date for ordinal date."""
        if not ordinal_date:
            ordinal_date = self.ordinal_date
        year = datetime.date.fromordinal(ordinal_date).year
        month = datetime.date.fromordinal(ordinal_date).month
        day = datetime.date.fromordinal(ordinal_date).day
        month_ordinals_to_names = {
            1: "January", 2: "February", 3: "March", 4: "April", 5: "May", 6: "June", 7: "July",
            8: "August", 9: "September", 10: "October", 11: "November", 12: "December"
        }
        date = "{} of {} {}, {}".format(
            # Note: for retconning, the time of day will always be whatever the actual time of day
            # is at the beginning of the true simulation ("day", I assume), but this shouldn't matter
            self.time_of_day.title(), month_ordinals_to_names[month], day, year
        )
        return date

    def progress(self, until=None):
        """Progress the cosmos until the specified date."""
        if not until:  # Progress one day
            until = self.ordinal_date + 1
        else:
            if type(until) is int:  # Just a year was passed
                until = datetime.date(year=until, month=1, day=1).toordinal()
            else:
                until = datetime.date(*until).toordinal()
        number_of_timesteps_until_then = max(1, (until-self.ordinal_date)*2)
        # JOR: TRYING WEIRD MEMORY TRICK HERE
        while number_of_timesteps_until_then > 1000:
            self._advance_n_timesteps(n_timesteps=1000)
            number_of_timesteps_until_then -= 1000
        self._advance_n_timesteps(number_of_timesteps_until_then)

    def _advance_n_timesteps(self, n_timesteps=1):
        """Simulate the passing of a chunk of time at a lower fidelity than normal."""
        for i in xrange(n_timesteps):
            self._advance_time()
            for l in self.leagues:
                l.operate()
            for city in self.cities:
                if random.random() < 0.03:
                    city.manipulate_population()
                if random.random() < CHANCE_OF_A_DAY_BEING_SIMULATED:
                    self._simulate_a_timestep_in_a_city(city)

    def _advance_time(self):
        """Advance time of day and date, if it's a new day."""
        self.time_of_day = "night" if self.time_of_day == "day" else "day"
        self.weather = random.choice(['good', 'bad'])
        if self.time_of_day == "day":
            self.ordinal_date += 1
            new_date_tuple = datetime.date.fromordinal(self.ordinal_date)
            if new_date_tuple.year != self.year:
                # Happy New Year
                self.true_year = new_date_tuple.year
                self.year = new_date_tuple.year
                if self.debug:
                    print "Updating each city's nearest cities..."
                for city in self.cities:
                    city.set_nearest_cities()
            self.month = new_date_tuple.month
            self.day = new_date_tuple.day
            self.date = self.get_date()
            if self.debug:
                print self.date
            self._handle_any_birthdays_today()
            self._handle_any_city_establishments_today()
        else:  # Nighttime
            self.date = self.get_date()
        # Lastly, set a new random number for this timestep
        self.random_number_this_timestep = random.random()

    def _handle_any_birthdays_today(self):
        """Age any living character whose birthday is today."""
        if (self.month, self.day) not in self.birthdays:
            self.birthdays[(self.month, self.day)] = set()
        else:
            for person in self.birthdays[(self.month, self.day)]:
                if person.alive:
                    person.grow_older()
            # Don't forget leap-year babies
            if (self.month, self.day) == (3, 1):
                for person in self.birthdays[(2, 29)]:
                    if person.present:
                        person.grow_older()

    def _handle_any_city_establishments_today(self):
        """Establish any cities that have been prescribed to be established today."""
        if self.ordinal_date in self.city_data.ordinal_dates_of_city_establishment:
            for city_specification in self.city_data.ordinal_dates_of_city_establishment.get(self.ordinal_date, set()):
                City(cosmos=self, specification=city_specification)

    def _simulate_a_timestep_in_a_city(self, city):
        """Simulate a timestep in the given city."""
        if city.cosmos.debug:
            print "Simulating a {} in {}...".format(self.time_of_day, city.full_name)
        # Simulate birth, death, retirement, college, and moving out of parents
        for person in list(city.residents):
            if city.cosmos.debug:
                print "\t...simulating birth..."
            if person.pregnant and self.ordinal_date >= person.due_date:
                person.give_birth()
            if city.cosmos.debug:
                print "\t...simulating death..."
            if person.age > max(65, random.random() * 100):
                person.die(cause_of_death="Natural causes")
            if city.cosmos.debug:
                print "\t...simulating retirement and degree conferment..."
            elif person.occupation and person.age > max(65, random.random() * 100):
                person.retire()
            elif person.adult and not person.occupation:
                if person.age > 22:
                    person.college_graduate = True
            if city.cosmos.debug:
                print "\t...simulating new adults moving out..."
            elif person.age > 18 and person not in person.home.owners:
                person.move_out_of_parents_home()
        days_since_last_simulated_day = self.ordinal_date-city.last_simulated_day
        # Reset all Relationship interacted_this_timestep attributes
        for person in list(city.residents):
            for other_person in person.relationships:
                person.relationships[other_person].interacted_this_timestep = False
        # Have people go to the location they will be at this timestep
        if city.cosmos.debug:
                print "\t...enacting NPC routines..."
        for person in list(city.residents):
            person.routine.enact()
        # Simulate sex  TODO sex outside out marriage
        if city.cosmos.debug:
                print "\t...simulating sex..."
        for person in list(city.residents):
            if person.marriage and person.spouse.home is person.home:
                chance_they_are_trying_to_conceive_this_year = (
                    self.config.function_to_determine_chance_married_couple_are_trying_to_conceive(
                        n_kids=len(person.marriage.children_produced)
                    )
                )
                chance_they_are_trying_to_conceive_this_year /= CHANCE_OF_A_DAY_BEING_SIMULATED*365
                if random.random() < chance_they_are_trying_to_conceive_this_year:
                    person.have_sex(partner=person.spouse, protection=False)
        # Have people observe their surroundings, which will cause knowledge to
        # build up, and have them socialize with other people also at that location --
        # this will cause relationships to form/progress and knowledge to propagate
        if city.cosmos.debug:
                print "\t...simulating social interactions..."
        for person in list(city.residents):
            if person.age > 3:
                # person.observe()
                person.socialize(
                    missing_timesteps_to_account_for=days_since_last_simulated_day*2,
                    propagate_knowledge=False
                )
        city.last_simulated_day = self.ordinal_date

    def find_by_hex(self, hex_value):
        """Return person whose ID in memory has the given hex value."""
        int_of_hex = int(hex_value, 16)
        try:
            person = next(
                # p for p in self.city.residents | self.city.deceased | self.city.departed if
                p for p in self.people if
                id(p) == int_of_hex
            )
            return person
        except StopIteration:
            raise Exception('There is no one with that hex ID')
Пример #32
0
            hist += confusion_matrix(pred.flatten(), label.flatten(),
                                     cfg.class_num)
            ious = per_class_iou(hist) * 100
            miou = np.nanmean(ious)
            print(f'\rBatch: {i + 1}/{total_batch}, mIOU: {miou:.2f}', end='')

    print('\nPer class iou:')
    for i, iou in enumerate(ious):
        print(f'{i}: {iou:.2f}')

    return miou


if __name__ == '__main__':
    args = parser.parse_args()
    cfg = Config(args=args.__dict__, mode='Val')
    cfg.show_config()

    model = DLASeg(cfg).cuda()
    model.load_state_dict(torch.load(cfg.trained_model), strict=True)
    model.eval()
    if cfg.onnx:
        net_in = torch.randn(4, 3, 128, 128, requires_grad=True).cuda()
        torch_out = torch.onnx.export(
            model,  # model being run
            net_in,  # model input (or a tuple for multiple inputs)
            "dla.onnx",
            verbose=True,
            # store the trained parameter weights inside the model file
            training=False,
            do_constant_folding=True,
Пример #33
0
def disconnect():
    program = '"' + Config(
    ).sslvpn_folder + '/FortiSSLVPNclient.exe" disconnect'
    subprocess.Popen(program)
    logging.info('sslvpn: connection terminated')
    time.sleep(5)
Пример #34
0
    for _i, test_batch in enumerate(config.reader.build_test_data_loader()):
        batch_q = test_batch['question_ids'].to(config.device)
        batch_a = test_batch['answer_ids'].to(config.device)
        score = model(batch_q, batch_a).detach().tolist()
        scores = scores + score

    map_val, mrr_val, prec_1 = config.reader.evaluate(scores,
                                                      mode='test',
                                                      acc=False)
    print('test_map: {:.4f}, test_mrr: {:.4f}, test_prec@1: {:.4f}'.format(
        map_val, mrr_val, prec_1))


if __name__ == '__main__':

    config = Config()
    config_file = 'configs/qa.ini'  # define dataset in the config
    config.parse_config(config_file)
    print('Dataset_name = {}'.format(config.dataset_name))
    tokenizer = build_tokenizer('data\\QA\\trec')
    data_collator = DataCollator(config, tokenizer)
    reader = DataReader(config, tokenizer, data_collator)
    config.reader = reader

    if torch.cuda.is_available():
        config.device = torch.device('cuda')
        torch.cuda.manual_seed(config.seed)
    else:
        config.device = torch.device('cpu')
        torch.manual_seed(config.seed)
Пример #35
0
        help='Call to utility')
    parser.add_argument('-i', '--id', type=str, help='Instance ID')
    parser.add_argument('-e', '--eid', type=int, help='Environment ID')
    return parser.parse_args()


def test():
    config = Config('100')
    instance_id = 'i-3029b47b'
    instance = aws_ec2.get_instance(config,instance_id)
    pprint(vars(instance))


if __name__ == '__main__':
    args = parse_args()
    config = Config()
    logger = logging.getLogger(__name__)
    logger.setLevel(logging.DEBUG)
    formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
    handler = logging.FileHandler(config.get_home_dir() + "utils.log")
    handler.setFormatter(formatter)
    logger.addHandler(handler)

    if args.operation == 'get_log':
        if not args.id:
            print 'Need to specify instance ID to retrieve the log'
        else:
            print aws_utils.get_console_log(args.id)
    if args.operation == 'test':
        test()
Пример #36
0
import selenium
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from utils.config import Config

hturl = Config().get ('HTURL')
class BasePage (object):
    def __init__(self):
        self.driver = selenium.webdriver.Chrome()
        # # fp = selenium.webdriver.FirefoxProfile (r"C:\Users\Administrator\AppData\Roaming\Mozilla\Firefox\Profiles\hwtosi65.default")
        # # self.driver =selenium.webdriver.Firefox (fp)  # 启动带插件的firefox
        # self.driver=selenium.webdriver.Firefox()  #启动不带插件的firefox
        # self.base_url = "http://192.168.1.249:8484/hk-management-services/index.html"  #测试环境
        # 预发布环境
        # self.base_url="http://39.105.87.46:8501/hk-management-services/login.html?_v=20180525targetUrlAfterLogin=http://39.105.87.46:8501/hk-management-services/index.html"
        self.base_url = hturl
        self.driver.get(self.base_url)
Пример #37
0
class Email:
    def __init__(self, path=None):
        """初始化
        :param title: 邮件标题,必填。
        :param message: 邮件正文,非必填。
        :param path: 附件路径,可传入list(多附件)或str(单个附件),非必填。
        :param server: smtp服务器,必填。
        :param sender: 发件人,必填。
        :param password: 发件人密码,必填。
        :param receiver: 收件人,多收件人用“;”隔开,必填。
        """

        self.title = Config().get("EMAIL").get('title')
        self.message = Config().get("EMAIL").get('message')
        self.files = path

        self.msg = MIMEMultipart('related')

        self.server = Config().get("EMAIL").get('server')
        self.sender = Config().get("EMAIL").get('sender')
        self.receiver = Config().get("EMAIL").get('receiver')
        self.password = Config().get("EMAIL").get('password')

    def _attach_file(self, att_file):
        """将单个文件添加到附件列表中"""
        att = MIMEText(open('%s' % att_file, 'rb').read(), 'plain', 'utf-8')
        att["Content-Type"] = 'application/octet-stream'
        file_name = re.split(r'[\\|/]', att_file)
        att["Content-Disposition"] = 'attachment; filename="%s"' % file_name[-1]
        self.msg.attach(att)
        logger.info('attach file {}'.format(att_file))

    def send(self):
        self.msg['Subject'] = self.title
        self.msg['From'] = self.sender
        self.msg['To'] = self.receiver

        # 邮件正文
        if self.message:
            self.msg.attach(MIMEText(self.message))

        # 添加附件,支持多个附件(传入list),或者单个附件(传入str)
        if self.files:
            if isinstance(self.files, list):
                for f in self.files:
                    self._attach_file(f)  #调用附件上传
            elif isinstance(self.files, str):
                self._attach_file(self.files)

        # 连接服务器并发送
        try:
            smtp_server = smtplib.SMTP(self.server)  # 连接sever
        except (gaierror and error) as e:
            logger.exception('发送邮件失败,无法连接到SMTP服务器,检查网络以及SMTP服务器. %s', e)
        else:
            try:
                smtp_server.login(self.sender, self.password)  # 登录
            except smtplib.SMTPAuthenticationError as e:
                logger.exception('用户名密码验证失败!%s', e)
            else:
                smtp_server.sendmail(self.sender, self.receiver.split(';'),
                                     self.msg.as_string())  # 发送邮件
            finally:
                smtp_server.quit()  # 断开连接
                logger.info('发送邮件"{0}"成功! 收件人:{1}。如果没有收到邮件,请检查垃圾箱,'
                            '同时检查收件人地址是否正确'.format(self.title, self.receiver))
Пример #38
0
from utils.roi_helpers import calc_iou
from utils.utils import BBoxUtility


def write_log(callback, names, logs, batch_no):
    for name, value in zip(names, logs):
        summary = tf.Summary()
        summary_value = summary.value.add()
        summary_value.simple_value = value
        summary_value.tag = name
        callback.writer.add_summary(summary, batch_no)
        callback.writer.flush()


if __name__ == "__main__":
    config = Config()
    NUM_CLASSES = 21  #**********************#**********************#**********************
    EPOCH = 100
    EPOCH_LENGTH = 2000
    bbox_util = BBoxUtility(
        overlap_threshold=config.rpn_max_overlap,
        ignore_threshold=config.rpn_min_overlap)  # 实例化对象用来处理框
    annotation_path = '2007_train.txt'

    model_rpn, model_classifier, model_all = get_model(config, NUM_CLASSES)
    base_net_weights = "model_data/voc_weights.h5"  #**********************#**********************#**********************

    model_all.summary()
    model_rpn.load_weights(base_net_weights, by_name=True)
    model_classifier.load_weights(base_net_weights, by_name=True)
Пример #39
0
# -*- coding: utf-8 -*-
from .base import *
from utils.config import Config
import os
import re

MYSQL_CONFIG = Config.get('config', 'mysql')

db_host = MYSQL_CONFIG.get('default').get('host')
db_port = MYSQL_CONFIG.get('default').get('port')
db_name = MYSQL_CONFIG.get('default').get('db')
db_user = MYSQL_CONFIG.get('default').get('user')
db_password = MYSQL_CONFIG.get('default').get('password')

pattern = re.compile('^%.*%$')

if pattern.match(str(db_host)):
    db_host = os.environ.get(db_host.strip('%'), None)

if pattern.match(str(db_port)):
    db_port = os.environ.get(db_port.strip('%'), None)

if pattern.match(str(db_name)):
    db_name = os.environ.get(db_name.strip('%'), None)

if pattern.match(str(db_user)):
    db_user = os.environ.get(db_user.strip('%'), None)

if pattern.match(str(db_password)):
    db_password = os.environ.get(db_password.strip('%'), None)
Пример #40
0
def test():
    parser = argparse.ArgumentParser(description="")

    parser.add_argument(
        'config_path',
        type=str,
        help="path to the config file",
    )
    parser.add_argument(
        '--dataset_size',
        type=str,
        help="config override",
    )

    args = parser.parse_args()

    config = Config.from_file(args.config_path)

    if args.dataset_size is not None:
        config.override(
            'prooftrace_dataset_size',
            args.dataset_size,
        )

    # sequence_size = config.get('prooftrace_sequence_length')
    # action_size = len(PROOFTRACE_TOKENS) - len(PREPARE_TOKENS)

    # prd_actions = torch.rand(action_size)
    # prd_lefts = torch.rand(sequence_size)
    # prd_rights = torch.rand(sequence_size)

    # prd_actions = torch.log(prd_actions / prd_actions.sum())
    # prd_lefts = torch.log(prd_lefts / prd_lefts.sum())
    # prd_rights = torch.log(prd_rights / prd_rights.sum())

    # env = Env(config, False)
    # env.reset()
    # env.explore(
    #     prd_actions,
    #     prd_lefts,
    #     prd_rights,
    #     1.0,
    #     1.0,
    #     3,
    # )
    # print(".")

    pool = Pool(config, False)
    pool.reset(1.0)

    observations, rewards, dones, infos = pool.step(
        [[8, 12, 13]] * config.get('prooftrace_env_pool_size'),
        1.0,
        1.0,
        3,
    )
    for i in range(config.get('prooftrace_env_pool_size')):
        Log.out("STEP", {
            'index': i,
            'reward': rewards[i],
            'done': dones[i],
        })
Пример #41
0
 def setUp(self):
     self.conf = Config()
Пример #42
0
 def setUp(self):
     c = Config()
     ip = c.get('ip')
     port = c.get('port')
     self.client = TCPClient(ip, port)
    def __init__(self):
        self.hook = os.getenv('BC_HOOK')

        if not self.hook:
            c = Config()
            self.hook = c.data('bearychat', 'hook')
Пример #44
0
 def __init__(self):
     self._materials = {}
     self.config = Config.Instance()
Пример #45
0
def test_caller(path, step_ind, on_val, dataset_path, noise, calc_tsne):
    ##########################
    # Initiate the environment
    ##########################

    # Choose which gpu to use
    GPU_ID = '0'

    # Set GPU visible device
    os.environ['CUDA_VISIBLE_DEVICES'] = GPU_ID

    # Disable warnings
    os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'

    ###########################
    # Load the model parameters
    ###########################

    # Load model parameters
    config = Config()
    config.load(path)

    ##################################
    # Change model parameters for test
    ##################################

    # Change parameters for the test here. For example, you can stop augmenting the input data.

    # config.augment_noise = 0.0001
    # config.augment_color = 1.0

    # Adjust batch num if only a single model is to be completed
    if on_val:
        val_data_paths = sorted([
            os.path.join(dataset_path, 'val', 'partial',
                         k.rstrip() + '.h5')
            for k in open(os.path.join(dataset_path, 'val.list')).readlines()
        ])
        if int(len(val_data_paths)) == 1:
            config.validation_size = 1
            config.batch_num = 1
    else:
        test_data_paths = sorted([
            os.path.join(dataset_path, 'test', 'partial',
                         k.rstrip() + '.h5')
            for k in open(os.path.join(dataset_path, 'val.list')).readlines()
        ])
        if int(len(test_data_paths)) == 1:
            config.validation_size = 1
            config.batch_num = 1

    # Augmentations
    config.augment_scale_anisotropic = True
    config.augment_symmetries = [False, False, False]
    config.augment_rotation = 'none'
    config.augment_scale_min = 1.0
    config.augment_scale_max = 1.0
    config.augment_noise = noise
    config.augment_occlusion = 'none'

    ##############
    # Prepare Data
    ##############

    print()
    print('Dataset Preparation')
    print('*******************')

    # Initiate dataset configuration
    dl0 = 0  # config.first_subsampling_dl
    if config.dataset.startswith('ShapeNetV1'):
        dataset = ShapeNetV1Dataset()
        # Create subsample clouds of the models
        dataset.load_subsampled_clouds(dl0)
    elif config.dataset.startswith("pc_shapenetCompletionBenchmark2048"):
        dataset = ShapeNetBenchmark2048Dataset(config.batch_num,
                                               config.num_input_points,
                                               dataset_path)
        # Create subsample clouds of the models
        dataset.load_subsampled_clouds(
            dl0)  # TODO: careful dl0 is used here - padding?
    else:
        raise ValueError('Unsupported dataset : ' + config.dataset)

    # Initialize input pipelines
    if on_val:
        dataset.init_input_pipeline(config)
    else:
        dataset.init_test_input_pipeline(config)

    ##############
    # Define Model
    ##############

    print('Creating Model')
    print('**************\n')
    t1 = time.time()

    if config.dataset.startswith('ShapeNetV1') or config.dataset.startswith(
            "pc_shapenetCompletionBenchmark2048"):
        model = KernelPointCompletionNetwork(dataset.flat_inputs, config,
                                             args.double_fold)
    else:
        raise ValueError('Unsupported dataset : ' + config.dataset)

    # Find all snapshot in the chosen training folder
    snap_path = os.path.join(path, 'snapshots')
    snap_steps = [
        int(f[:-5].split('-')[-1]) for f in os.listdir(snap_path)
        if f[-5:] == '.meta'
    ]

    # Find which snapshot to restore
    if step_ind == -1:
        chosen_step = np.sort(snap_steps)[step_ind]
    else:
        chosen_step = step_ind + 1
    chosen_snap = os.path.join(path, 'snapshots',
                               'snap-{:d}'.format(chosen_step))

    # Create a tester class
    tester = ModelTester(model, restore_snap=chosen_snap)
    t2 = time.time()

    print('\n----------------')
    print('Done in {:.1f} s'.format(t2 - t1))
    print('----------------\n')

    ############
    # Start test
    ############

    print('Start Test')
    print('**********\n')

    if config.dataset.startswith('ShapeNetV1') or config.dataset.startswith(
            "pc_shapenetCompletionBenchmark2048"):
        tester.test_completion(model, dataset, on_val, calc_tsne)
    else:
        raise ValueError('Unsupported dataset')
Пример #46
0
def get_availability_zones():
    config = Config()
    ec2 = boto.ec2.connect_to_region(config.get("region"))
    return ec2.get_all_zones()
Пример #47
0
    # Automatically retrieve the last trained model
    if chosen_log in handled_logs:

        # Dataset name
        test_dataset = '_'.join(chosen_log.split('_')[1:])

        # List all training logs
        logs = np.sort([
            os.path.join('results', f) for f in os.listdir('results')
            if f.startswith('Log')
        ])

        # Find the last log of asked dataset
        for log in logs[::-1]:
            log_config = Config()
            log_config.load(log)
            if log_config.dataset.startswith(test_dataset):
                chosen_log = log
                break

        if chosen_log in handled_logs:
            raise ValueError('No log of the dataset "' + test_dataset +
                             '" found')

    # Check if log exists
    if not os.path.exists(chosen_log):
        raise ValueError('The given log does not exists: ' + chosen_log)

    # Let's go
    test_caller(chosen_log, chosen_snapshot, args.on_val, args.dataset_path,
Пример #48
0
import socket
from utils.config import Config

TEST_HOST1 = '10.40.2.62'
TEST_HOST2 = '10.40.2.109'
c = Config()


def get_remote_ip(url):
    return socket.gethostbyname(url)


def get_account(url):
    ip = get_remote_ip(url)
    account = ''
    if ip == TEST_HOST1:
        account = c.get("account")['test1']
    elif ip == TEST_HOST2:
        account = c.get('account')['test2']
    else:
        print("invalid url")
    return account


def get_pay_method():
    yield from c.get('payMapping')


if __name__ == '__main__':
    rip = get_remote_ip('user.hqygou.com')
    # t1 = c.get('account')['test1']
Пример #49
0
        "title": newfilename, 
        "bilayer_ie_c33_table": df.to_html()}

    html_out = template.render(template_vars)

    # generate pdf from rendered html 

    from weasyprint import HTML, CSS
    report_style = CSS(filename=report_style_filename)
    # output to pdf
    HTML(string=html_out).write_pdf(
        report_pdf_filename,
        stylesheets=[report_style])
#%%

c = Config() 
df1_columns_index = [15, 2,3,5,7,8,9,10,11,12]
df1_columns_labels = ['uid','bilayer', 'monolayer1','monolayer2','IE','IE_error','IE_rel_error','C33','C33_error','C33_rel_err']



#%%

import time
chunk_size = 5000
for i, df_chunk in enumerate(pd.read_csv(c.uid_300K, chunksize=chunk_size)):
    # re-order and label columns
    df_chunk = df_chunk.iloc[:, df1_columns_index]
    df_chunk.columns = df1_columns_labels
    range_str = f"{i*chunk_size} to {(i*chunk_size)+df_chunk.shape[0]-1}"
    print(range_str)
Пример #50
0
 def __init__(self, cfg):
     self.config = Config.from_json(cfg)  # from module
Пример #51
0
 def __init__(self, debug=True):
     """Initialize a Cosmos object."""
     self.debug = debug
     # Determine and display an official Baseball Cosmos ID :)
     self.id = self._init_cosmos_id()
     if debug:
         print "Preparing {self}...".format(self=self)
     # Load the config parameters
     self.config = Config()
     # Prepare Thoughts class
     Thoughts.thought_prototypes = [
         ThoughtPrototype(tag=spec[0], likelihood=spec[1], preconditions=spec[2], effects=spec[3])
         for spec in self.config.thought_prototype_specifications
     ]
     # Load the city data (specifies data about all cities that will eventually
     # be established in this simulation)
     self.city_data = CityData()
     # Load the NLG module for this game instance, etc.
     self.productionist = Productionist(game=self)
     self.errors = []
     self.problems = []
     # This gets incremented each time a new person is born/generated,
     # which affords a persistent ID for each person
     self.current_person_id = 0
     self.current_place_id = 0
     # Determine whether baseball curses are real in this baseball cosmos
     self.curses_are_real = random.random() < self.config.chance_baseball_curses_are_real
     # Prepare attributes relating to time
     # self.year = self.config.year_worldgen_begins
     # self.true_year = self.config.year_worldgen_begins  # True year never gets changed during retconning
     self.ordinal_date = datetime.date(*self.config.date_worldgen_begins).toordinal()  # N days since 01-01-0001
     self.year = datetime.date(*self.config.date_worldgen_begins).year
     self.true_year = self.year  # True year never gets changed during retconning
     self.month = datetime.date(*self.config.date_worldgen_begins).month
     self.day = datetime.date(*self.config.date_worldgen_begins).day
     self.time_of_day = 'day'
     self.date = self.get_date()
     # Prepare a listing of all in-game events, which will facilitate debugging later
     self.events = []
     # A game's event number allows the precise ordering of events that
     # happened on the same timestep -- every time an event happens, it requests an
     # event number from Game.assign_event_number(), which also increments the running counter
     self.event_number = -1
     # Prepare a listing of all people born on each day -- this is used to
     # age people on their birthdays; we start with (2, 29) initialized because
     # we need to perform a check every March 1 to ensure that all leap-year babies
     # celebrate their birthday that day on non-leap years
     self.birthdays = {(2, 29): set()}
     # Prepare a number that will hold a single random number that is generated daily -- this
     # facilitates certain things that should be determined randomly but remain constant across
     # a timestep, e.g., whether a person locked their door before leaving home
     self.random_number_this_timestep = random.random()
     # self.establish_setting()
     # self._sim_and_save_a_week_of_timesteps()
     self.weather = None
     # Prepare geographic listings
     self.countries = []
     self.states = []
     self.cities = []
     # Instantiate a first country
     Country(name='United States of America', cosmos=self)
     # Prepare baseball-centric attributes
     self.baseball_classifications = [
         # TODO MAKE THIS BOTTOM-UP; HAVE AGENTS NEGOTIATE TO COMPOSE/MODIFY CLASSES
         Class(cosmos=self, level='AAA'),
         InformalPlay(cosmos=self)
     ]
     self.leagues = []  # Leagues based here
Пример #52
0
import torch.optim as optim

from models.frustum_net import FrustumPointDanFull
from utils.config import Config, configs

# model
configs.model = Config(FrustumPointDanFull)
configs.model.num_classes = configs.data.num_classes
configs.model.num_heading_angle_bins = configs.data.num_heading_angle_bins
configs.model.num_size_templates = configs.data.num_size_templates
configs.model.num_points_per_object = configs.data.num_points_per_object
configs.model.size_templates = configs.data.size_templates
configs.model.extra_feature_channels = 1

# train: scheduler
configs.train.scheduler_g = Config(optim.lr_scheduler.CosineAnnealingLR)
configs.train.scheduler_g.T_max = configs.train.num_epochs
configs.train.scheduler_c = Config(optim.lr_scheduler.CosineAnnealingLR)
configs.train.scheduler_c.T_max = configs.train.num_epochs
Пример #53
0
def connect(ip, port, username, password):
    program = '"' + Config().sslvpn_folder + '/FortiSSLVPNclient.exe" connect -m -i -q -h ' + ip + ':' \
              + port + ' -u ' + username + ':' + password
    subprocess.Popen(program)
    logging.info('sslvpn: connection established')
    time.sleep(7)
Пример #54
0
 def setUp(self):
     c = Config()
     ip = c.get('ip')
     port = c.get('port')
     self.client = TCPClient(ip, port)
Пример #55
0
def main(dataset_name, net_name, load_config, load_model, objective, nu,
         device, seed, optimizer_name, lr, n_epochs, lr_milestone, batch_size,
         weight_decay, pretrain, ae_optimizer_name, ae_lr, ae_n_epochs,
         ae_lr_milestone, ae_batch_size, ae_weight_decay, n_jobs_dataloader,
         ratio, run_times):
    """
    Deep SVDD, a fully deep method for anomaly detection.

    :arg DATASET_NAME: Name of the dataset to load.
    :arg NET_NAME: Name of the neural network to use.
    :arg XP_PATH: Export path for logging the experiment.
    :arg DATA_PATH: Root path of data.
    """
    class_num = 10
    if dataset_name == 'cifar100':
        class_num = 20
    for run_index in range(run_times):
        #for ratio in [0.05, 0.1, 0.15, 0.2, 0.25]:
        for i in range(class_num):
            normal_class = i
            class_name = get_class_name_from_index(normal_class, dataset_name)
            os.makedirs(os.path.join(RESULTS_DIR), exist_ok=True)
            # Get configuration
            cfg = Config(locals().copy())

            # Set up logging
            logging.basicConfig(level=logging.INFO)
            logger = logging.getLogger()
            logger.setLevel(logging.INFO)
            formatter = logging.Formatter(
                '%(asctime)s - %(name)s - %(levelname)s - %(message)s')

            xp_path = RESULTS_DIR

            log_file = xp_path + '/log.txt'
            file_handler = logging.FileHandler(log_file)
            file_handler.setLevel(logging.INFO)
            file_handler.setFormatter(formatter)
            logger.addHandler(file_handler)

            # Print arguments
            logger.info('Log file is %s.' % log_file)
            logger.info('Export path is %s.' % xp_path)

            logger.info('Dataset: %s' % dataset_name)
            logger.info('Normal class: %d' % normal_class)
            logger.info('Network: %s' % net_name)

            # If specified, load experiment config from JSON-file
            if load_config:
                cfg.load_config(import_json=load_config)
                logger.info('Loaded configuration from %s.' % load_config)

            # Print configuration
            logger.info('Deep SVDD objective: %s' % cfg.settings['objective'])
            logger.info('Nu-paramerter: %.2f' % cfg.settings['nu'])

            # Set seed
            cfg.settings['seed'] = run_index
            if cfg.settings['seed'] != -1:
                random.seed(cfg.settings['seed'])
                np.random.seed(cfg.settings['seed'])
                torch.manual_seed(cfg.settings['seed'])
                logger.info('Set seed to %d.' % cfg.settings['seed'])

            # Default device to 'cpu' if cuda is not available
            if not torch.cuda.is_available():
                device = 'cpu'
            logger.info('Computation device: %s' % device)
            logger.info('Number of dataloader workers: %d' % n_jobs_dataloader)

            # Load data
            dataset = load_dataset(dataset_name, normal_class, ratio)

            # Initialize DeepSVDD model and set neural network \phi
            deep_SVDD = DeepSVDD(cfg.settings['objective'], cfg.settings['nu'])
            deep_SVDD.set_network(net_name)
            # If specified, load Deep SVDD model (radius R, center c, network weights, and possibly autoencoder weights)
            if load_model:
                deep_SVDD.load_model(model_path=load_model, load_ae=True)
                logger.info('Loading model from %s.' % load_model)

            logger.info('Pretraining: %s' % pretrain)
            if pretrain:
                # Log pretraining details
                logger.info('Pretraining optimizer: %s' %
                            cfg.settings['ae_optimizer_name'])
                logger.info('Pretraining learning rate: %g' %
                            cfg.settings['ae_lr'])
                logger.info('Pretraining epochs: %d' %
                            cfg.settings['ae_n_epochs'])
                logger.info(
                    'Pretraining learning rate scheduler milestones: %s' %
                    (cfg.settings['ae_lr_milestone'], ))
                logger.info('Pretraining batch size: %d' %
                            cfg.settings['ae_batch_size'])
                logger.info('Pretraining weight decay: %g' %
                            cfg.settings['ae_weight_decay'])

                # Pretrain model on dataset (via autoencoder)
                deep_SVDD.pretrain(
                    dataset,
                    optimizer_name=cfg.settings['ae_optimizer_name'],
                    lr=cfg.settings['ae_lr'],
                    n_epochs=cfg.settings['ae_n_epochs'],
                    lr_milestones=cfg.settings['ae_lr_milestone'],
                    batch_size=cfg.settings['ae_batch_size'],
                    weight_decay=cfg.settings['ae_weight_decay'],
                    device=device,
                    n_jobs_dataloader=n_jobs_dataloader)

            # Log training details
            logger.info('Training optimizer: %s' %
                        cfg.settings['optimizer_name'])
            logger.info('Training learning rate: %g' % cfg.settings['lr'])
            logger.info('Training epochs: %d' % cfg.settings['n_epochs'])
            logger.info('Training learning rate scheduler milestones: %s' %
                        (cfg.settings['lr_milestone'], ))
            logger.info('Training batch size: %d' % cfg.settings['batch_size'])
            logger.info('Training weight decay: %g' %
                        cfg.settings['weight_decay'])

            # Train model on dataset
            deep_SVDD.train(dataset,
                            optimizer_name=cfg.settings['optimizer_name'],
                            lr=cfg.settings['lr'],
                            n_epochs=cfg.settings['n_epochs'],
                            lr_milestones=cfg.settings['lr_milestone'],
                            batch_size=cfg.settings['batch_size'],
                            weight_decay=cfg.settings['weight_decay'],
                            device=device,
                            n_jobs_dataloader=n_jobs_dataloader)

            # Test model
            scores, labels = deep_SVDD.test(
                dataset, device=device, n_jobs_dataloader=n_jobs_dataloader)

            res_file_name = '{}_dsvdd-{}_{}_{}.npz'.format(
                dataset_name, ratio, class_name,
                datetime.now().strftime('%Y-%m-%d-%H%M'))
            res_file_path = os.path.join(RESULTS_DIR, dataset_name,
                                         res_file_name)
            os.makedirs(os.path.join(RESULTS_DIR, dataset_name), exist_ok=True)
            save_roc_pr_curve_data(scores, labels, res_file_path)

            # Plot most anomalous and most normal (within-class) test samples
            # indices, labels, scores = zip(*deep_SVDD.results['test_scores'])
            # indices, labels, scores = np.array(indices), np.array(labels), np.array(scores)
            # idx_sorted = indices[labels == 0][np.argsort(scores[labels == 0])]  # sorted from lowest to highest anomaly score
            #
            # if dataset_name in ('mnist', 'cifar10'):
            #
            #     if dataset_name == 'mnist':
            #         X_normals = dataset.test_set.test_data[idx_sorted[:32], ...].unsqueeze(1)
            #         X_outliers = dataset.test_set.test_data[idx_sorted[-32:], ...].unsqueeze(1)
            #
            #     if dataset_name == 'cifar10':
            #         X_normals = torch.tensor(np.transpose(dataset.test_set.test_data[idx_sorted[:32], ...], (0, 3, 1, 2)))
            #         X_outliers = torch.tensor(np.transpose(dataset.test_set.test_data[idx_sorted[-32:], ...], (0, 3, 1, 2)))
            #
            #     plot_images_grid(X_normals, export_img=xp_path + '/normals', title='Most normal examples', padding=2)
            #     plot_images_grid(X_outliers, export_img=xp_path + '/outliers', title='Most anomalous examples', padding=2)

            # Save results, model, and configuration
            logger.info('finish class {} training.'.format(class_name))
    logger.info('send exp finish mail.')
    send_mailgun()
Пример #56
0
from utils.thread import ThreadPool
from utils.common import Task, repair_filename, touch_dir, size_format
from utils.playlist import Dpl
from utils.downloader import FileManager

spider = Crawler()
VIDEO, PDF, RICH_TEXT = 1, 3, 4

headers = {
    'User-Agent':
    'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/64.0.3282.167 Safari/537.36',
    # 'edu-app-version': '3.17.1',
}
srt_types = ["zh-cn", "en"]
spider.headers.update(headers)
CONFIG = Config()


def login(username, password):
    """ 登录获取 token """
    pd = hashlib.md5()
    pd.update(password.encode('utf-8'))
    passwd = pd.hexdigest()
    headers = {'edu-app-type': 'android', 'edu-app-version': '2.6.1'}
    data = {'username': username, 'passwd': passwd, 'mob-token': ''}
    res = spider.post('http://www.icourse163.org/mob/logonByIcourse',
                      headers=headers,
                      data=data)
    result = res.json()
    code = result.get("status").get("code")
    if code == 0:
Пример #57
0
 def load_dot_buildc_rc(file_path):
     c = Config.load_config(file_path)
     return c
Пример #58
0
    #     time.sleep(2)
    #     links = self.dr.find_elements(*self.locator_result)
    #     for link in links:
    #         # print(link.text)
    #         logger.info(link.text)


# if __name__ == '__main__':
#     test_suite = unittest.defaultTestLoader.discover(TEST_PATH, pattern='test*.py')
#     result = BeautifulReport(test_suite)
#     result.report(filename='测试报告', description='测试deafult报告', log_path=REPORT_PATH)

if __name__ == '__main__':
    report = REPORT_PATH + '\\report.html'
    with open(report, 'wb') as f:
        runner = HTMLTestRunner(f,
                                verbosity=2,
                                title='测试报告',
                                description='testreport')
        runner.run(TestBaiDu('test_search_0'))

    c = Config().get('mail')
    e = Email(title=c.get('title'),
              message=c.get('message'),
              receiver=c.get('receiver'),
              server=c.get('server'),
              sender=c.get('sender'),
              password=c.get('password'),
              path=report)
    e.send()
Пример #59
0
 def load_setup_cfg(file_path):
     c = Config.load_config(file_path)
     return c
Пример #60
0
class Test_Load(unittest.TestCase):

    config = Config()
    url = config.get('local_url')  # 集群测试
    url = config.get('api_url')  #  华为api网关测试
    print('测试网址:', url)
    images = config.get('images')  # 测试图片

    num = int(config.get('num'))
    print('测试并发量:', num)
    total_time = 0  # 总耗时
    total_payload = 0  # 总负载
    total_num = 0  # 总并发数
    all_time = []

    # 设置访问网址和请求方式
    def setUp(self):
        self.client = Asyncio_Client()
        print('准备工作')

    # 创建一个异步任务
    async def task_func(self):
        data = {'image_id': 2}
        begin = time.time()
        print('开始发送:', begin)
        files = {
            'image': open(self.image, 'rb')
        }  # open的目录启动命令的目录,我在server.py目录启动,所以使用的这个路径
        r = requests.post(self.url, data=data, files=files)
        print(r.text)
        end = time.time()
        self.total_time += end - begin
        print('接收完成:', end)

    # 创建一个异步任务,本地测试,所以post和接收几乎不损耗时间,可以等待完成,主要耗时为算法模块
    async def task_func1(self, session):

        begin = time.time()
        # print('开始发送:', begin)
        file = open(self.image, 'rb')
        fsize = os.path.getsize(self.image)
        self.total_payload += fsize / (1024 * 1024)

        data = {"image_id": "2", 'image': file}
        r = await session.post(self.url, data=data)  #只post,不接收
        result = await r.json()
        self.total_num += 1
        # print(result)
        end = time.time()
        # print('接收完成:', end,',index=',self.total_num)
        self.all_time.append(end - begin)

    # 负载测试
    def test_safety(self):

        print('test begin')
        async_client = Asyncio_Client()  # 创建客户端
        session = aiohttp.ClientSession()
        for i in range(1):  # 执行10次
            self.all_time = []
            self.total_num = 0
            self.total_payload = 0
            self.image = DATA_PATH + "/" + self.images[0]  # 设置测试nayizhang
            print('测试图片:', self.image)
            begin = time.time()
            async_client.set_task(self.task_func1, self.num, session)  # 设置并发任务
            async_client.run()  # 执行任务

            end = time.time()
            self.all_time.sort(reverse=True)
            print(self.all_time)
            print('并发数量(个):', self.total_num)
            print('总耗时(s):', end - begin)
            print('最大时延(s):', self.all_time[0])
            print('最小时延(s):', self.all_time[len(self.all_time) - 1])
            print('top-90%时延(s):',
                  self.all_time[int(len(self.all_time) * 0.1)])
            print('平均耗时(s/个):', sum(self.all_time) / self.total_num)
            print('支持并发率(个/s):', self.total_num / (end - begin))
            print('总负载(MB):', self.total_payload)
            print('吞吐率(MB/S):', self.total_payload /
                  (end - begin))  # 吞吐率受上行下行带宽,服务器带宽,服务器算法性能诸多影响

            time.sleep(3)

        session.close()
        print('test finish')