示例#1
0
    def __init__(self, args):
        win32serviceutil.ServiceFramework.__init__(self, args)
        self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
        config = get_config(parse_args=False)

        # Setup the correct options so the agent will use the forwarder
        opts, args = Values({
            'dd_url': None,
            'clean': False,
            'use_forwarder': True,
            'disabled_dd': False
        }), []
        agentConfig = get_config(parse_args=False, options=opts)
        self.restart_interval = \
            int(agentConfig.get('autorestart_interval', RESTART_INTERVAL))
        log.info("Autorestarting the collector ever %s seconds" % self.restart_interval)

        # Keep a list of running processes so we can start/end as needed.
        # Processes will start started in order and stopped in reverse order.
        self.procs = {
            'forwarder': DDForwarder(config),
            'collector': DDAgent(agentConfig),
            'dogstatsd': DogstatsdProcess(config),
            'pup':       PupProcess(config),
        }
示例#2
0
def create_task(screen_name, consumer_key, consumer_secret, user_key, user_secret):
    ecs = boto3.client('ecs', region_name=conf.get_config('AWS_REGION_NAME'))
    ec2 = boto3.client('ec2', region_name=conf.get_config('AWS_REGION_NAME'))

    try:
      rw = RandomWords()
      word = rw.random_words(count=3)
      password = '******' % (word[0], word[1], word[2])

      tn_logger.debug('Calling run_task')
      task_arn = run_task(ecs, screen_name, consumer_key, consumer_secret, user_key, user_secret, password)
      tn_logger.debug('Done calling run_task')
      task_info = get_task_info(ecs, task_arn)
      ip_address = get_connection_ip(ec2, task_info['instanceId'])
      try_connecting_neo4j(ip_address, task_info['port'])
      tn_logger.info('Created instance for tw:%s at %s:%s' % (screen_name,ip_address,task_info['port']))
    except Exception as e:
      tn_logger.exception(e)
      tn_logger.error('Error creating docker image for: tu:%s' % screen_name)
      print(traceback.format_exc())
      print(e)
      raise e

    response_dict = { 
      'url': 'http://%s:%s' % (ip_address, task_info['port']),
      'password': password }
 
    return response_dict
示例#3
0
def slice_spritesheet(filename, x_size, y_size):
    spritesheet = Image.open(filename)
    spritesheet.load()
    processed_directory = get_config("processed", "directory")

    x_tiles = int(spritesheet.size[0] / x_size)
    y_tiles = int(spritesheet.size[1] / y_size)

    filenames_out = []
    tile_number = int(get_config("last_file_index", "image"))
    # Process row
    for row in range(y_tiles):
        # Process tiles
        for tile in range(x_tiles):
            left = x_size * tile
            top = y_size * row
            right = left + x_size
            bottom = top + y_size
            box = (left, top, right, bottom)

            frame_image = spritesheet.crop(box)
            tile_number += 1
            tile_filename = "".join([str(tile_number), ".png"])
            tile_file = path.join(processed_directory, tile_filename)
            frame_image.save(tile_file)
            filenames_out.append(tile_filename)
    set_last_file_index(tile_number)

    return filenames_out
示例#4
0
def modify_req(request, apikey):

    username = request.user

    valid, message, userelement, projectelement = validUserPjt(username, apikey)

    if not valid:
        return HttpResponse(json.dupms({"success": False, "message": "wrong access"}), "application/json")

    # 오너가 아니라면 안됨!!
    if projectelement.owner_uid.id != userelement.id:
        return HttpResponse(json.dupms({"success": False, "message": "Only the owner"}), "application/json")

    stagedata = json.loads(get_config("app_stages"))
    categorydata = json.loads(get_config("app_categories"))
    platformdata = json.loads(get_config("app_platforms"))

    projectelement.category = categorydata[request.POST["category"]]
    projectelement.stage = stagedata[request.POST["stage"]]
    projectelement.platform = platformdata[request.POST["platform"]]
    projectelement.name = request.POST["projectname"]
    projectelement.timezone = request.POST["timezone"]

    # project modify
    projectelement.save()
    return HttpResponse(json.dumps({"success": True, "message": "success modify project"}), "application/json")
示例#5
0
    def __init__(self, **kwargs):
        """Initialize the TPAM SSH Client with a paramiko SSH Client"""
        self.conn = paramiko.SSHClient()
        self.conn.set_missing_host_key_policy(paramiko.AutoAddPolicy())
        
        # Read client configuration from ~/.tccfg or <cwd>.tccfg
        cfg_opts = {}
        config.get_config(cfg_opts)

        # Override .tccfg settings with anything passed in as keyword args
        if "tpam_key" in kwargs:
            kwargs["tpam_key"] = config.expand_user_path(kwargs["tpam_key"])
        cfg_opts.update(kwargs)
        
        # Validate config and establish SSH connection
        try:
            config.validate(cfg_opts)
        except error.TPAMConfigError:
            raise
        else:
            try:
                self.conn.connect(cfg_opts["tpam_host"], 
                    username=cfg_opts["tpam_user"], 
                    key_filename=cfg_opts["tpam_key"]
                )
            except (paramiko.SSHException, socket.error) as se:
                raise error.TPAMConnectError(se)
        
        # Dynamically add API methods to class
        # TODO: API version? - should ask Dell to include a version API call
        for method_name in api_2_5_11:
            self._generate(method_name)
示例#6
0
def intercom_companies():
    """ Get all companies from Intercom and return the data as a pandas
        DataFrame.

    """
    Intercom.app_id = get_config('INTERCOM_APP_ID')
    Intercom.app_api_key = get_config('INTERCOM_API_KEY')
    company_list = [FlatDict(c.to_dict) for c in Company.all()
                    if 'Verified' in c.custom_attributes and
                    c.custom_attributes['Verified']]
    companies = []
    for c in company_list:
        dic = {}
        for k, v in c.iteritems():
            kn = k.lower().split(':')[-1].replace(' ', '_')
            dic[kn] = v
        companies.append(dic)
    companies = pd.DataFrame(companies)
    companies = companies.T.drop(
        ['0', '1', 'id', 'widget_integrated', 'app_id',
         'automatic_confirm_bookings', 'minimum_book_in_advance_hours',
         'phone_number', 'monthly_spend']).T
    companies.last_request_at = companies.last_request_at.apply(
        lambda x: date.fromtimestamp(x))
    companies.created_at = companies.created_at.apply(
        lambda x: date.fromtimestamp(x))
    companies.remote_created_at = companies.remote_created_at.apply(
        lambda x: date.fromtimestamp(x))
    companies.updated_at = companies.updated_at.apply(
        lambda x: date.fromtimestamp(x))
    return companies
示例#7
0
def download(url):
    """ Return the contents of the URL in the argument. The `crawler_io`
        object will be used to cache the requests and keep track of redirect
        and error URLs.

        :param url: URL to download.

    """
    crawler_io = get_crawler_io()
    if crawler_io.is_error_url(url):
        return None
    text = crawler_io.load_str(url)
    if text:
        if get_config("CRAWLER_VERBOSE"):
            print "cached", url
    else:
        if get_config("CRAWLER_VERBOSE"):
            print "downloading", url
        try:
            response = requests.get(url)
        except Exception:
            crawler_io.add_error_url(url)
            return None
        if response.status_code >= 400:
            crawler_io.add_error_url(url)
            return None
        crawler_io.add_redirect(url, response.url)
        text = response.content
        crawler_io.save_str(url, text)
    return text
def package_component(component_name):
    package_common_lib()
    binary_dir = os.path.join(config.get_config()['package_dir'], 'temp')
    output_lib_dirs = os.path.join(config.get_config()['package_dir'], 'libs')
    package_dir = config.get_config()['package_dir']
    component_package_dir = os.path.join(config.get_config()['package_dir'], component_name)  
    if not os.path.exists(component_package_dir):
        os.makedirs(component_package_dir)

    if os.path.exists(binary_dir):
        shutil.rmtree(binary_dir)
    os.makedirs(binary_dir)

    build_dir = os.path.join(config.get_config()['build_dir'], component_name)
    if not os.path.exists(build_dir):
        print "Build dir does not exists"
        return

    shutil.copytree(os.path.join(build_dir, 'MainApp'), os.path.join(binary_dir, 'MainApp'))
    shutil.copytree(os.path.join(build_dir, 'plugins'), os.path.join(binary_dir, 'plugins'))
    os.symlink(output_lib_dirs, os.path.join(binary_dir, 'libs'))
    
    os.chdir(package_dir)
    call(['tar', 'cjf', os.path.join(component_package_dir, 
        "{0}_{1}.tar.gz".format(component_name, time.strftime('%y%m%d.%H%M'))),
        '-h', 'temp'])
    
    shutil.rmtree('temp')
示例#9
0
 def __init__(self):
     cur_path = os.path.dirname(os.path.abspath(__file__))
     self.app = webapp.create_app()
     self.app.debug = True
     self.port = get_config().getint("setup", "listen_port")
     self.pidfile = os.path.join(cur_path, "..", get_config().get("setup", "pidpath"))
     Daemon.__init__(self, self.pidfile)
示例#10
0
文件: pump19.py 项目: pyrige/pump19
def main():
    logger = logging.getLogger("pump19")
    logger.info("Pump19 started.")

    client_config = config.get_config("irc")
    client = protocol.Protocol(**client_config)
    loop = client.loop

    cmdhdl_config = config.get_config("cmd")
    # we don't need to remember this instance
    command.CommandHandler(client, loop=loop, **cmdhdl_config)

    def shutdown():
        logger.info("Shutdown signal received.")
        client.shutdown()
    loop.add_signal_handler(signal.SIGTERM, shutdown)

    logger.info("Running protocol activity.")
    client.start()
    loop.run_forever()

    # before we stop the event loop, make sure all tasks are done
    pending = asyncio.Task.all_tasks(loop)
    if pending:
        loop.run_until_complete(asyncio.wait(pending, timeout=5))

    loop.close()
    logger.info("Protocol activity ceased.")
    logger.info("Exiting...")
示例#11
0
def lisa_config_init():
    """
    Generate default config from function paramteres.
    Specific config given by command line argument is implemented in
    parser_init() function.
    """
    # read confguraton from file, use default values from OrganSegmentation
    cfg = config.get_default_function_config(OrganSegmentation.__init__)

    # for parameters without support in OrganSegmentation or to overpower
    # default OrganSegmentation values use cfgplus
    cfgplus = {
        'datapath': None,
        'viewermax': 225,
        'viewermin': -125,
        'output_datapath': os.path.expanduser("~/lisa_data"),
        'input_datapath_start': os.path.expanduser("~/lisa_data")
        # config_version':[1,1]
    }

    cfg.update(cfgplus)
    # now is in cfg default values

    cfg = config.get_config("organ_segmentation.config", cfg)
    user_config_path = os.path.join(cfg['output_datapath'],
                                    "organ_segmentation.config")
    config.check_config_version_and_remove_old_records(
        user_config_path, version=config_version,
        records_to_save=['experiment_caption', 'lisa_operator_identifier'])
    # read user defined config in user data
    cfg = config.get_config(user_config_path, cfg)
    return cfg
示例#12
0
    def __init__(self, args):
        win32serviceutil.ServiceFramework.__init__(self, args)
        self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
        config = get_config(parse_args=False)

        # Setup the correct options so the agent will use the forwarder
        opts, args = Values({
            'autorestart': False,
            'dd_url': None,
            'use_forwarder': True,
            'disabled_dd': False,
            'profile': False
        }), []
        agentConfig = get_config(parse_args=False, options=opts)
        self.hostname = get_hostname(agentConfig)

        # Watchdog for Windows
        self._collector_heartbeat, self._collector_send_heartbeat = multiprocessing.Pipe(False)
        self._collector_failed_heartbeats = 0
        self._max_failed_heartbeats = \
            MAX_FAILED_HEARTBEATS * agentConfig['check_freq'] / SERVICE_SLEEP_INTERVAL

        # Watch JMXFetch restarts
        self._MAX_JMXFETCH_RESTARTS = 3
        self._count_jmxfetch_restarts = 0

        # Keep a list of running processes so we can start/end as needed.
        # Processes will start started in order and stopped in reverse order.
        self.procs = {
            'forwarder': ProcessWatchDog("forwarder", DDForwarder(config, self.hostname)),
            'collector': ProcessWatchDog("collector", DDAgent(agentConfig, self.hostname,
                                         heartbeat=self._collector_send_heartbeat)),
            'dogstatsd': ProcessWatchDog("dogstatsd", DogstatsdProcess(config, self.hostname)),
            'jmxfetch': ProcessWatchDog("jmxfetch", JMXFetchProcess(config, self.hostname), 3),
        }
示例#13
0
def modify_req(request, apikey):

    username = request.user

    valid , message , userelement, projectelement = validUserPjt(username,apikey)

    if not valid:
        return HttpResponse(json.dupms({'success' : False , 'message' : 'wrong access'}),'application/json')

    #오너가 아니라면 안됨!!
    if(projectelement.owner_uid.id != userelement.id):
        return HttpResponse(json.dupms({'success' : False , 'message' : 'Only the owner'}),'application/json')


    stagedata = json.loads(get_config('app_stages'))
    categorydata = json.loads(get_config('app_categories'))
    platformdata = json.loads(get_config('app_platforms'))

    projectelement.category = categorydata[request.POST['category']]
    projectelement.stage = stagedata[request.POST['stage']]
    projectelement.platform = platformdata[request.POST['platform']]
    projectelement.name = request.POST['projectname']
    projectelement.timezone = request.POST['timezone']

    #project modify
    projectelement.save();
    return HttpResponse(json.dumps({'success' : True , 'message' : 'success modify project'}),'application/json')
示例#14
0
 def clone_or_get(self,URL,dest):
     try:
         git = GitConnect(dest)
     except IOError:
         GitConnect.clone(URL,dest)
         git = GitConnect(dest)
         git.repoConfig("user.name",get_config()["Git-Username"])
         git.repoConfig("user.email",get_config()["Git-Email"])
     return git
示例#15
0
def switch_version(arguments):
    component = arguments.component
    component_package_dir = os.path.join(config.get_config()['package_dir'], component)  
    deploy_dir = config.get_config()['deploy_dir']

    if (not os.path.exists(deploy_dir)):
        os.makedirs(deploy_dir)
    
    os.symlink(os.path.join(component_package_dir, arguments.target_version),
        os.path.join(deploy_dir, component))
示例#16
0
    def __init__(self, hostname, command):
        #TODO: don't hard code this here
        if "gulpin" in hostname or 'dugtrio' in hostname or 'delcatty' in hostname:
            user = get_config("GULPIN_IMMUSER")
        else:
            user = get_config("IMMUSER")

        command = "ipmitool -I lanplus -H %s -U %s -P '%s' chassis power %s" % \
            (hostname, user, get_config("CBMCPASSWD"), command)
        Command.__init__(self, command)
示例#17
0
def registration(request):
    # step1: login user element가져오기
    try:
        userElement = AuthUser.objects.get(username=request.user)
    except ObjectDoesNotExist:
        return HttpResponse('user "%s" not exists' % request.user)

    categorydata = json.loads(get_config("app_categories"))
    platformdata = json.loads(get_config("app_platforms"))
    stagedata = json.loads(get_config("app_stages"))
    # stagecolordata = json.loads(get_config('app_stages_color'))
    # avgcolordata = json.loads(get_config('avg_error_score_color'))
    countcolordata = json.loads(get_config("error_rate_color"))

    name = request.POST["name"]
    platformtxt = request.POST["platform"]
    stagetxt = request.POST["stage"]
    categorytxt = request.POST["category"]

    platform = platformdata[platformtxt]
    stage = stagedata[stagetxt]
    category = categorydata[categorytxt]
    color = ErrorRate_for_color(countcolordata, 0)

    # project name은 중복을 허용한다.

    # step2: apikey를 발급받는다. apikeysms 8자리 숫자
    apikey = newApikey()
    print "new apikey = %s" % apikey
    projectElement = Projects(
        owner_uid=userElement,
        apikey=apikey,
        name=name,
        platform=platform,
        stage=stage,
        category=category,
        timezone="Asia/Seoul",
    )
    projectElement.save()
    # step3: viwer db에 사용자와 프로젝트를 연결한다.
    Viewer.objects.create(uid=userElement, pid=projectElement)

    return HttpResponse(
        json.dumps(
            {
                "success": True,
                "prjname": name,
                "apikey": apikey,
                "color": color,
                "platform": platformtxt,
                "stage": stagetxt,
            }
        ),
        "application/json",
    )
示例#18
0
def getSettingDict(projectelement,userelement):

    isowner = False
    if(projectelement.owner_uid.id == userelement.id):
        isowner = True

    categorydata = json.loads(get_config('app_categories'))
    platformdata = json.loads(get_config('app_platforms'))
    stagedata = json.loads(get_config('app_stages'))

    platformnum = get_dict_value_matchin_number(platformdata,projectelement.platform)
    categorynum = get_dict_value_matchin_number(categorydata,projectelement.category)
    stagenum = get_dict_value_matchin_number(stagedata,projectelement.stage)

    platformtxt = get_dict_value_matchin_key(platformdata,projectelement.platform)

    count = 0
    for zone in pytz.common_timezones:
        if( zone == projectelement.timezone):
            break;
        count += 1

    #print len(pytz.common_timezones)
    #print 'getSettingDict' + str(count)

    viewerlist = []



    ViewerElements = Viewer.objects.select_related().filter(~Q(uid= userelement.id) ,pid = projectelement.pid)
    for v in ViewerElements:
        a = v.uid
        viewerlist.append(a.username)

    dict = {
        'platform_number' : platformnum + 1,
        'stage_number' : stagenum + 1,
        'category_number' : categorynum + 1,
        'timezonelist' : pytz.common_timezones,
        'timezone_number' : count + 1,
        'project_viewerlist' : viewerlist,
        'owner_user' : isowner,
        'app_platformlist' : platformdata.items(),
        'app_categorylist' : categorydata.items(),
        'app_stagelist' : stagedata.items(),
        'project_name' : projectelement.name,
        'project_platform' : platformtxt,
        'map_file_list' : Proguardmap.objects.filter(pid = projectelement),
    }
    for mapfile in dict['map_file_list']:
        mapfile.date = toTimezone(mapfile.uploadtime,projectelement.timezone).__format__('%Y.%m.%d')
        mapfile.time = toTimezone(mapfile.uploadtime,projectelement.timezone).__format__('%H:%M')
    #print dict['map_file_list']
    return dict
示例#19
0
    def __init__(self, args):
        win32serviceutil.ServiceFramework.__init__(self, args)
        self.hWaitStop = win32event.CreateEvent(None, 0, 0, None)
        config = get_config(parse_args=False)
        self.forwarder = DDForwarder(config)
        self.dogstatsd = DogstatsdThread(config)
        self.pup = PupThread(config)

        # Setup the correct options so the agent will use the forwarder
        opts, args = Values({"dd_url": None, "clean": False, "use_forwarder": True, "disabled_dd": False}), []
        agentConfig = get_config(parse_args=False, options=opts)
        self.agent = DDAgent(agentConfig)
def package_common_lib():
    program_lib_dirs = os.path.join(config.get_config()['source_dir'],
                           'libs')
    thirdparty_lib_dirs = os.path.join(config.get_config()['source_dir'],
                           'Thirdparty', 'lib', 'x64')
    output_lib_dirs = os.path.join(config.get_config()['package_dir'], 'libs')
    if not os.path.exists(output_lib_dirs):
        os.makedirs(output_lib_dirs)
    for filename in glob.glob(os.path.join(program_lib_dirs, "*")):
        shutil.copy2(filename, output_lib_dirs)
    for filename in glob.glob(os.path.join(thirdparty_lib_dirs, "*")):
        shutil.copy2(filename, output_lib_dirs)
示例#21
0
def send_confirmation_mail(username, email):
    mail.send_mail(
        sender="%s Admin <%s>" % (config.get_config("site_name"), config.get_config("admin_email")),
        to="%s <%s>" % (username, email),
        subject="Confirmation mail",
        body=_(
            """Hello %(name)s,
            Thanks for registering on our site.
            Now, click this link %(link)s to confirm your account.
            """
        )
        % {"name": username, "link": generate_confirm_link(username)},
    )
def ImageDisk(server_uuid, diskIndex, customerUUID, customerUsername, customerPassword, endpoint, default_user="******", isVerbose=False):

    # Actually just defines the global variables now (since all config bits are passed on the command line)
    config.get_config("")

    config.CUST_UUID = customerUUID
    config.USER_LOGIN = customerUsername
    config.USER_PASSWORD = customerPassword
    config.HOST_NAME = endpoint

    auth_client = setup()

    image_state = image_disk(auth_client, server_uuid, default_user, 0)
    return image_state
示例#23
0
def get_crawler_io():
    """ Return the instance that handles crawler I/O functionality for
        caching and keeping track of redirect and error URLs.

        Is a `IoRethinkdb` instance if CRAWLER_USE_RETHINK is True else `IoFs`.

    """
    global crawler_io
    if crawler_io is None:
        if get_config("CRAWLER_USE_RETHINK"):
            crawler_io = IoRethinkdb(get_config("DB_HOST"), get_config("DB_PORT"))
        else:
            crawler_io = IoFs(get_config("CRAWLER_PATH"))
    return crawler_io
示例#24
0
def StopVM(server_uuid, customerUUID, customerUsername, customerPassword, endpoint, isVerbose=False):

    # Actually just defines the global variables now (since all config bits are passed on the command line)
    config.get_config("")

    config.CUST_UUID = customerUUID
    config.USER_LOGIN = customerUsername
    config.USER_PASSWORD = customerPassword
    config.HOST_NAME = endpoint
    # config.NETWORK_TYPE  = networkType

    auth_client = setup()
    server_state = stop_server(auth_client, server_uuid)
    return server_state
示例#25
0
文件: RunGTlike.py 项目: cdeil/enrico
def run(infile):
    """Run an entire Fermi analysis (spectrum) by reading a config file"""
    config = get_config(infile)
    folder = config['out']
    os.system('mkdir -p ' + folder)

    runfit,Fit = GenAnalysisObject(config)
    # create all the fit files and run gtlike
    runfit.PerformFit(Fit)

    if config['verbose'] == 'yes' :
        utils.GetFluxes(Fit,runfit.obs.Emin,runfit.obs.Emax) #print the flux of all the sources

    Result = runfit.GetAndPrintResults(Fit)#Get and dump the target specific results
    utils.DumpResult(Result, config)

    #plot the SED and model map if possible and asked
    if config['Spectrum']['ResultPlots'] == 'yes' :
    	os.system("mkdir -p "+config['out'] + '/Spectrum/')
        if float(config['UpperLimit']['TSlimit']) < Fit.Ts(config['target']['name']):
            runfit.ComputeSED(Fit)
        outXml = utils._dump_xml(config)
        if SummedLike == 'yes': # the possiblity of making the model map is checked inside the function
            runfitback.ModelMap(outXml)
            runfitfront.ModelMap(outXml)
        else:
            runfit.ModelMap(outXml)

    #  Make energy bins by run a *new* analysis
    Nbin = config['Ebin']['NumEnergyBins']
    if int(Nbin) > 0:
        configfiles = utils.PrepareEbin(Fit, runfit)
        ind = 0
        enricodir = environ.DIRS.get('ENRICO_DIR')
        fermidir = environ.DIRS.get('FERMI_DIR')
        for conf in configfiles:
             pathconf = folder + "/Ebin" + str(Nbin) +"/" + conf
             Newconfig = get_config(pathconf)
             cmd = enricodir+"/enrico/RunGTlike.py "+pathconf
             if Newconfig['Ebin']['Submit'] == 'no' : #run directly
                 os.system(cmd)
             else : #submit a job to a cluster
                 prefix = Newconfig['out'] + "/Ebin" + str(ind) 
                 scriptname = prefix + "_Script.sh"
                 JobLog = prefix + "_Job.log"
                 JobName = (Newconfig['target']['name'] + "_" +
                           Newconfig['analysis']['likelihood'] +
                           "_Ebin_" + str(ind) + "_" + Newconfig['file']['tag'])
                 call(cmd, enricodir, fermidir, scriptname, JobLog, JobName)# submition
             ind+=1
def init(config_path=None, use_watchdog=False, use_forwarder=False):
    c = get_config(parse_args=False, cfg_path=config_path, init_logging=True)

    logger.debug("Configuration dogstatsd")

    port      = c['dogstatsd_port']
    interval  = int(c['dogstatsd_interval'])
    normalize = c['dogstatsd_normalize']
    api_key   = c['api_key']

    target = c['dd_url']
    if use_forwarder:
        target = c['dogstatsd_target'] 

    hostname = gethostname(c)

    # Create the aggregator (which is the point of communication between the
    # server and reporting threads.
    normalization_factor = 1.0
    if normalize:
        normalization_factor = 1.0 / interval
    aggregator = MetricsAggregator(hostname, normalization_factor)

    # Start the reporting thread.
    reporter = Reporter(interval, aggregator, target, api_key, use_watchdog)

    # Start the server.
    server_host = ''
    server = Server(aggregator, server_host, port)

    return reporter, server
示例#27
0
def init(config_path=None, use_watchdog=False, use_forwarder=False):
    c = get_config(parse_args=False, cfg_path=config_path)
    log.debug("Configuration dogstatsd")

    port      = c['dogstatsd_port']
    interval  = int(c['dogstatsd_interval'])
    normalize = c['dogstatsd_normalize']
    api_key   = c['api_key']
    non_local_traffic = c['non_local_traffic']

    target = c['dd_url']
    if use_forwarder:
        target = c['dogstatsd_target'] 

    hostname = get_hostname(c)

    # Create the aggregator (which is the point of communication between the
    # server and reporting threads.
    assert 0 < interval

    aggregator = MetricsAggregator(hostname, interval, recent_point_threshold=c.get('recent_point_threshold', None))

    # Start the reporting thread.
    reporter = Reporter(interval, aggregator, target, api_key, use_watchdog)

    # Start the server on an IPv4 stack
    # Default to loopback
    server_host = '127.0.0.1'
    # If specified, bind to all addressses
    if non_local_traffic:
        server_host = ''

    server = Server(aggregator, server_host, port)

    return reporter, server
示例#28
0
文件: lc.py 项目: CityHawk/lc-tools
def get_lc(profile):
    conf = get_config(profile)

    driver = get_driver(getattr(Provider, conf.get('driver').upper()))
    conn = driver(conf.get('access_id'), conf.get('secret_key'))

    return conn
示例#29
0
 def __init__(self):
     import os
     from config import get_config
     cfg = get_config()
     self.base = cfg['base_dir']
     self.MAX_RATING = cfg['max_rating']
     self.data = []
     # self.data_with_links = []
     for root, dirs, files in os.walk(self.base):
         for file in files:
             if file.endswith(".pdf") or file.endswith(".PDF"):
                 relative_root = root[len(self.base):]
                 file_info = self.__get_file_info(relative_root, file, self.MAX_RATING)
                 self.data.append(
                     {
                         # 'full_path': os.path.join(root, file),
                         'path': os.path.join(relative_root, file).replace('\\', '/'),
                         'type': 'pdf',
                         'rating': file_info['rating'],
                         'year': file_info['year'],
                         'author': file_info['author'],
                         'publisher': file_info['publisher'],
                         'citations': file_info['citations'],
                         'title': file_info['title']
                     }
                 )
                 # test
                 if not file_info['title']:
                     print(self.data[-1])
     self.meta_data = {
         'length': len(self.data),
         'base': self.base
     }
示例#30
0
def get_person_from_config():
    """ get the username value for Ubuntu SSO from the config file
    """
    cfg = get_config()
    if cfg.has_option("reviews", "username"):
        return cfg.get("reviews", "username")
    return None
def trainWithGan(mode, dis_mode,dataset, epochs, loss='l1Loss', gan_loss = 'vanllia', op='momentum', lr=1e-2, batch_size=4,
                 load_gen_model=None, load_dis_model=None, save_dir=None, source=False, start_index = 0, LAMBDA = 1):
    device = torch.device('cuda:0')
    # Get model 
    netG = get_model(mode, dataset, source)
    netG.to(device)

    netD = get_discriminator(dis_mode, dataset)
    netD.to(device)
    
    #Check if there is trained model
    if load_gen_model is not None:
        netG.load_state_dict(torch.load(load_gen_model))
    
    if load_dis_model is not None:
        netD.load_state_dict(torch.load(load_dis_model))

    netG.train()
    netD.train()

    # traditional loss function 
    loss_normal = get_loss(loss)
    loss_normal.to(device)

    # gan loss
    loss_gan = get_loss('lsganLoss')
    loss_gan.to(device)

    # loss in Gen and Dis 
    optimD = torch.optim.Adam(netD.parameters(), lr=5e-5, betas=(0.5, 0.999))
    optimG = get_optim(netG, op, lr)

    train_count = int(get_config(dataset,  'train_count'))
    total_it = math.ceil(train_count * epochs / batch_size)

    epoch = train_count // batch_size

    if save_dir is not None:
        if os.path.exists(save_dir) is False:
            os.mkdir(save_dir)

    for i in range(total_it):
        batch_list = list(np.random.randint(1, train_count, size=[batch_size]))

        images, depths = get_train_data(dataset, batch_list)
        images = torch.from_numpy(images).cuda().float()
        depths = torch.from_numpy(depths).cuda()

        mask = torch.tensor(depths)
        if dataset is 'Make3D':
            mask = (depths > 0.0) & (depths < 70.0)
        elif dataset is 'NyuV2':
            mask = (depths > 0.0) & (depths < 10.0)

        #Gen the depth predicticon and this is for fake lable
        fake_predict = netG(images)
        fake_predict = torch.unsqueeze(fake_predict, 1)        

        set_required_grad(netD, True)
        optimD.zero_grad()

        # backward the netD
        fake_predict_temp = torch.cat([images, fake_predict.detach()], 1)
        image_depth_pair = torch.cat([images, torch.unsqueeze(depths, 1)], 1)
        
        #fake
        pred_fake = netD(fake_predict_temp.detach())
        loss_D_fake = loss_gan(fake_predict, 0.)

        #real
        real_predict = netD(image_depth_pair)
        loss_D_real = loss_gan(real_predict, 1.)
        
        lossD = (loss_D_fake + loss_D_real) * 0.5
        lossD.backward(retain_graph=True)
        optimD.step()

        # backward netG
        set_required_grad(netD, False)
        optimG.zero_grad()

        fake_predict_temp = torch.cat([images, fake_predict], 1)
        pred_fake = netD(fake_predict_temp)
        loss_G_GAN = loss_gan(pred_fake, 1.)
        loss_l1 = loss_normal(torch.squeeze(fake_predict, 1), depths, mask)
        loss_G = loss_G_GAN + loss_l1 * LAMBDA 
        loss_G.backward()
        optimG.step()

        if i % 100 == 0:
            print (i, loss_G_GAN.cpu().detach().numpy(), loss_l1.cpu().detach().numpy())

        if i % epoch == epoch - 1:
            torch.save(netG.state_dict(), '{}/gen{}.pkl'.format(save_dir,start_index + i // epoch))
            torch.save(netD.state_dict(), '{}/dis{}.pkl'.format(save_dir,start_index + i // epoch))
    pass
            "scaling_factor": self.scaling_factor,
            "base_metric": self.base_metric
        }

        try:
            self.resultdb[self.results_collection].insert_one(
                sizing_result_doc)
        except Exception as e:
            self.logger.error(
                "Unable to store sizing result doc in MongoDB: %s" %
                str(sizing_result_doc))
            raise Exception("Unable to store sizing result doc in MongoDB: " +
                            str(e))


if __name__ == "__main__":
    config = get_config()
    sa = SizingAnalyzer(config)
    end_time = 1517016459493000000
    ANALYSIS_WINDOW_SECOND = 300
    start_time = end_time - ANALYSIS_WINDOW_SECOND * NANOSECONDS_PER_SECOND

    status = sa.analyze_node_cpu(start_time, end_time)
    print("Node cpu finished with status: " + str(status.to_dict()))
    status = sa.analyze_node_memory(start_time, end_time)
    print("Node memory finished with status: " + str(status.to_dict()))
    status = sa.analyze_container_cpu(start_time, end_time)
    print("Container cpu finished with status: " + str(status.to_dict()))
    status = sa.analyze_container_memory(start_time, end_time)
    print("Container memory finished with status: " + str(status.to_dict()))
示例#33
0
            mse = mean_squared_error(y_true=y_test, y_pred=y_hat)
            mae = mean_absolute_error(y_test, y_hat)
            errors[i, 0] = mse
            errors[i, 1] = mae

        mean_cv_err = np.round(np.mean(errors, axis=0), 4)
        std_cv_err = np.round(np.std(errors, axis=0), 4)

        return mean_cv_err[0], std_cv_err[0], mean_cv_err[1], std_cv_err[
            1], errors


if __name__ == "__main__":

    c = get_config()
    region_grid = RegionGrid(config=c)
    region_grid.load_weighted_mtx()
    region_grid.load_housing_data(c['housing_data_file'])

    y_house = region_grid.get_target_var("house_price")
    results = []

    # Adjacency Average Model
    mse, mse_std, mae, mae_std, err_adj = cv_adj_mean(
        region_grid.regions, region_grid.matrix_idx_map, y_house)
    results.append(['adjacent avg', mse, mse_std, mae, mae_std])

    y_is_valid = np.where(~np.isnan(y_house))[0]
    y_house = y_house[y_is_valid]
示例#34
0
 def __init__(self):
     self.host = config.get_config("database", "host")
     self.port = int(config.get_config("database", "port"))
     self.user = config.get_config("database", "user")
     self.pwd = config.get_config("database", "pwd")
示例#35
0
def main():
    args = get_config()

    # cuda
    if args.cuda and torch.cuda.is_available():
        device = torch.device("cuda:0")
        torch.set_num_threads(1)
    else:
        device = torch.device("cpu")
        torch.set_num_threads(args.n_training_threads)

    run_dir = Path(args.model_dir) / ("run" + str(args.seed)) / 'eval'
    if os.path.exists(run_dir):
        shutil.rmtree(run_dir)
        os.mkdir(run_dir)
    log_dir = run_dir / 'logs'
    os.makedirs(str(log_dir))
    logger = SummaryWriter(str(log_dir))
    gifs_dir = run_dir / 'gifs'
    os.makedirs(str(gifs_dir))

    num_agents = args.num_agents

    # actor_critic = torch.load('/home/chenjy/curriculum/results/MPE/simple_spread/check/run10/models/agent_model.pt')['model'].to(device)
    actor_critic = torch.load(
        '/home/tsing73/curriculum/results/MPE/simple_spread/homework/run4/models/agent_model.pt'
    )['model'].to(device)
    # filename = '/home/tsing73/curriculum/'
    # for name, para in zip(actor_critic.actor_base.state_dict(),actor_critic.actor_base.parameters()):
    #     pdb.set_trace()
    #     a = para.transpose(0,1).reshape(1,-1)
    #     np.savetxt(filename+ name + '.txt',np.array(a.to('cpu').detach()),delimiter=',\n')
    # pdb.set_trace()
    actor_critic.agents_num = 2
    actor_critic.boxes_num = 2
    num_agents = 2
    num_boxes = 2
    all_frames = []
    cover_rate = 0
    random.seed(1)
    np.random.seed(1)

    # load files
    dir_path = '/home/chenjy/curriculum/diversified_left/' + ('archive_' +
                                                              str(89))
    if os.path.exists(dir_path):
        with open(dir_path, 'r') as fp:
            archive = fp.readlines()
        for i in range(len(archive)):
            archive[i] = np.array(archive[i][1:-2].split(), dtype=float)

    starts = produce_good_case_grid_pb(500, [-0.6, 0.6, -0.6, 0.6], num_agents,
                                       num_boxes)
    for eval_episode in range(args.eval_episodes):
        print(eval_episode)
        eval_env = MPEEnv(args)
        if args.save_gifs:
            image = eval_env.render('rgb_array', close=False)[0]
            all_frames.append(image)

        # eval_obs, _ = eval_env.reset(num_agents,num_boxes)
        eval_obs, _ = eval_env.reset(num_agents)
        # eval_obs = eval_env.new_starts_obs(start,num_agents)
        # eval_obs = eval_env.new_starts_obs_pb(starts[eval_episode],num_agents,num_boxes)
        eval_obs = np.array(eval_obs)
        eval_share_obs = eval_obs.reshape(1, -1)
        eval_recurrent_hidden_states = np.zeros(
            (num_agents, args.hidden_size)).astype(np.float32)
        eval_recurrent_hidden_states_critic = np.zeros(
            (num_agents, args.hidden_size)).astype(np.float32)
        eval_masks = np.ones((num_agents, 1)).astype(np.float32)
        step_cover_rate = np.zeros(shape=(args.episode_length))

        for step in range(args.episode_length):
            calc_start = time.time()
            eval_actions = []
            for agent_id in range(num_agents):
                if args.share_policy:
                    actor_critic.eval()
                    _, action, _, recurrent_hidden_states, recurrent_hidden_states_critic = actor_critic.act(
                        agent_id,
                        torch.FloatTensor(eval_share_obs),
                        torch.FloatTensor(eval_obs[agent_id].reshape(1, -1)),
                        torch.FloatTensor(
                            eval_recurrent_hidden_states[agent_id]),
                        torch.FloatTensor(
                            eval_recurrent_hidden_states_critic[agent_id]),
                        torch.FloatTensor(eval_masks[agent_id]),
                        None,
                        deterministic=True)
                else:
                    actor_critic[agent_id].eval()
                    _, action, _, recurrent_hidden_states, recurrent_hidden_states_critic = actor_critic[
                        agent_id].act(
                            agent_id,
                            torch.FloatTensor(eval_share_obs),
                            torch.FloatTensor(eval_obs[agent_id]),
                            torch.FloatTensor(
                                eval_recurrent_hidden_states[agent_id]),
                            torch.FloatTensor(
                                eval_recurrent_hidden_states_critic[agent_id]),
                            torch.FloatTensor(eval_masks[agent_id]),
                            None,
                            deterministic=True)
                eval_actions.append(action.detach().cpu().numpy())
                eval_recurrent_hidden_states[
                    agent_id] = recurrent_hidden_states.detach().cpu().numpy()
                eval_recurrent_hidden_states_critic[
                    agent_id] = recurrent_hidden_states_critic.detach().cpu(
                    ).numpy()
            # rearrange action
            eval_actions_env = []
            for agent_id in range(num_agents):
                one_hot_action = np.zeros(eval_env.action_space[0].n)
                one_hot_action[eval_actions[agent_id][0]] = 1
                eval_actions_env.append(one_hot_action)
            pdb.set_trace()
            # Obser reward and next obs
            eval_obs, eval_rewards, eval_dones, eval_infos, _ = eval_env.step(
                eval_actions_env)
            step_cover_rate[step] = eval_infos[0]['cover_rate']
            eval_obs = np.array(eval_obs)
            eval_share_obs = eval_obs.reshape(1, -1)

            if args.save_gifs:
                image = eval_env.render('rgb_array', close=False)[0]
                all_frames.append(image)
                calc_end = time.time()
                elapsed = calc_end - calc_start
                if elapsed < args.ifi:
                    time.sleep(ifi - elapsed)
        print('cover_rate: ', np.mean(step_cover_rate[-5:]))
        cover_rate += np.mean(step_cover_rate[-5:])
        if args.save_gifs:
            gif_num = 0
            imageio.mimsave(str(gifs_dir / args.scenario_name) +
                            '_%i.gif' % gif_num,
                            all_frames,
                            duration=args.ifi)
        # if save_gifs:
        #     gif_num = 0
        #     while os.path.exists('./gifs/' + model_dir + '/%i_%i.gif' % (gif_num, ep_i)):
        #         gif_num += 1
        #     imageio.mimsave('./gifs/' + model_dir + '/%i_%i.gif' % (gif_num, ep_i),
        #                     frames, duration=ifi)
    print('average_cover_rate: ', cover_rate / args.eval_episodes)
    eval_env.close()
示例#36
0
 def get_config(self, name, parse_args=False):
     """
     Small helper function to load fixtures configs
     """
     return get_config(cfg_path=os.path.join(self.CONFIG_FOLDER, name), parse_args=parse_args)
示例#37
0
def log(log_list):
    string = join_and_sanitize(log_list) + '\n'
    timestamp = time.strftime('%Y-%m-%dT%H:%M:%S ', time.gmtime())
    string = timestamp + string
    with open(config.get_config('log_path'), 'a') as log_file:
        log_file.write(string)
示例#38
0
def test(DATASET="Texas"):
    CONFIG = get_config(DATASET)
    _, _, EVALUATE, _ = datasets.fetch(DATASET, **CONFIG)
    cd = ChangeDetector(**CONFIG)
    cd.print_all_input_images(EVALUATE)
示例#39
0
from sqlalchemy import create_engine, func
from orm_schema import Pool, Host, Area
from sqlalchemy.engine.url import URL
from sqlalchemy.orm import sessionmaker
from config import get_config
from orm_io import dbIO
import pika, json
from threading import Thread

conf_dict = get_config('conf.json')
db = conf_dict['database']
broker = conf_dict['broker']


def choose_Host(cpu, memory, area):
    postgres_db = {
        'drivername': 'postgres',
        'username': '******',
        'password': '******',
        'host': db,
        'port': 5432,
        'database': 'monitor'
    }
    uri = URL(**postgres_db)
    engine = create_engine(uri)
    Session = sessionmaker(bind=engine)
    session = Session()
    io = dbIO(db)
    a = io.query(Area, area_name=area)[0]
    m = memory
    q = session.query(Host).filter(Host.host_free_memory >= m,
示例#40
0
from pycnic.core import WSGI, Handler
import json
import yaml

from model import database_init
from auth import requires_auth, change_auth
from config import get_config
from core.ngrokmanager import NgrokManager
from core.error import TunnelInstanceError, TunnelManagerError

NM = NgrokManager(get_config('basic', 'docker_url'))


class Info(Handler):
    @requires_auth()
    def get(self):
        return {
            "version": get_config('basic', 'version'),
        }


class Token(Handler):
    def options(self):
        set_options(self)
        return ''

    def patch(self):
        old = self.request.data.get('old_token', '')
        new = self.request.data.get('new_token', '')

        if not new:
示例#41
0
 def get(self):
     return {
         "version": get_config('basic', 'version'),
     }
示例#42
0
 def __init__(self):
     self.corpus = get_corpus()
     self.corpus.initialize()
     self.config = get_config()
     self.model = None
示例#43
0
def api_requests_frequency():
    return float(get_config()["requests_frequency"])  # sec
示例#44
0
from config import get_config
from Learner import face_learner
from data.data_pipe import get_val_pair
from torchvision import transforms as trans

conf = get_config(training=False)
learner = face_learner(conf, inference=True)
learner.load_state(conf, 'ir_se50.pth', model_only=True)

# LFW evaluation
lfw, lfw_issame = get_val_pair(conf.emore_folder, 'lfw')
accuracy, best_threshold, roc_curve_tensor = learner.evaluate(conf,
                                                              lfw,
                                                              lfw_issame,
                                                              nrof_folds=10,
                                                              tta=False)
print('lfw - accuray:{}, threshold:{}'.format(accuracy, best_threshold))
trans.ToPILImage()(roc_curve_tensor)
示例#45
0
from urlparse import urlparse
import logging
import memcache
import config as conf

from py2neo import Graph
from py2neo import neo4j
from py2neo.packages.httpstream import http
from py2neo.packages.httpstream import SocketError

import create_task

application = Flask(__name__)
application.debug = True

application.secret_key = conf.get_config('SESSION_KEY_SECRET')
application.config['SESSION_TYPE'] = 'filesystem'

oauth = OAuth()

twitter = oauth.remote_app('twitter',
    base_url='https://api.twitter.com/1/',
    request_token_url='https://api.twitter.com/oauth/request_token',
    access_token_url='https://api.twitter.com/oauth/access_token',
    authorize_url='https://api.twitter.com/oauth/authenticate',
    consumer_key=conf.get_config('TWITTER_CONSUMER_KEY'),
    consumer_secret=conf.get_config('TWITTER_CONSUMER_SECRET'),
    access_token_method='POST'
)

logging.getLogger("py2neo.cypher").setLevel(logging.CRITICAL)
示例#46
0
 def __init__(self):
     self.api_config = config.get_idex_api_config()
     self.configs = config.get_config()
示例#47
0
    def __init__(
        self,
        sqlexecute=None,
        prompt=None,
        logfile=None,
        auto_vertical_output=False,
        warn=None,
        liteclirc=None,
    ):
        self.sqlexecute = sqlexecute
        self.logfile = logfile

        # Load config.
        c = self.config = get_config(liteclirc)

        self.multi_line = c["main"].as_bool("multi_line")
        self.key_bindings = c["main"]["key_bindings"]
        special.set_favorite_queries(self.config)
        self.formatter = TabularOutputFormatter(
            format_name=c["main"]["table_format"])
        self.formatter.litecli = self
        self.syntax_style = c["main"]["syntax_style"]
        self.less_chatty = c["main"].as_bool("less_chatty")
        self.cli_style = c["colors"]
        self.output_style = style_factory_output(self.syntax_style,
                                                 self.cli_style)
        self.wider_completion_menu = c["main"].as_bool("wider_completion_menu")
        c_dest_warning = c["main"].as_bool("destructive_warning")
        self.destructive_warning = c_dest_warning if warn is None else warn
        self.login_path_as_host = c["main"].as_bool("login_path_as_host")

        # read from cli argument or user config file
        self.auto_vertical_output = auto_vertical_output or c["main"].as_bool(
            "auto_vertical_output")

        # audit log
        if self.logfile is None and "audit_log" in c["main"]:
            try:
                self.logfile = open(os.path.expanduser(c["main"]["audit_log"]),
                                    "a")
            except (IOError, OSError):
                self.echo(
                    "Error: Unable to open the audit log file. Your queries will not be logged.",
                    err=True,
                    fg="red",
                )
                self.logfile = False

        self.completion_refresher = CompletionRefresher()

        self.logger = logging.getLogger(__name__)
        self.initialize_logging()

        prompt_cnf = self.read_my_cnf_files(["prompt"])["prompt"]
        self.prompt_format = (prompt or prompt_cnf or c["main"]["prompt"]
                              or self.default_prompt)
        self.prompt_continuation_format = c["main"]["prompt_continuation"]
        keyword_casing = c["main"].get("keyword_casing", "auto")

        self.query_history = []

        # Initialize completer.
        self.completer = SQLCompleter(
            supported_formats=self.formatter.supported_formats,
            keyword_casing=keyword_casing,
        )
        self._completer_lock = threading.Lock()

        # Register custom special commands.
        self.register_special_commands()

        self.prompt_app = None
示例#48
0
from tfutils import biGRU

from modules import attention_decoder
from modules import highway_network
from modules import conv1d_banks
from modules import prenet

from utils import spectrogram2wav

import tensorflow as tf
import numpy as np

__AUTHOR__ = "kozistr"
__VERSION__ = "0.1"

cfg, _ = get_config()  # configuration

# set random seed
np.random.seed(cfg.seed)
tf.set_random_seed(cfg.seed)


class Tacotron:
    def __init__(self,
                 sess,
                 mode="train",
                 sample_rate=22050,
                 vocab_size=251,
                 embed_size=256,
                 n_mels=80,
                 n_fft=2048,
示例#49
0
文件: vxapi.py 项目: vaginessa/VxAPI
def main():
    try:
        if os.path.exists('config.py'):
            from config import get_config
            config = get_config()
        else:
            raise MissingConfigurationError('Configuration is missing. Before running CLI, please copy the file \'config_tpl.pl\' from current dir, rename it to \'config.pl\', and fill')
    
        program_name = 'VxWebService Python API Connector'
        program_version = __version__
        vxapi_cli_headers = {'User-agent': 'VxApi CLI Connector'}

        if config['server'].endswith('/'):
            config['server'] = config['server'][:-1]

        map_of_available_actions = OrderedDict([
            (ACTION_GET_API_LIMITS, CliApiLimits(ApiApiLimits(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_ENVIRONMENTS, CliEnvironments(ApiEnvironments(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_FEED, CliFeed(ApiFeed(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_RELATIONSHIPS, CliRelationships(ApiRelationships(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_RESULT, CliResult(ApiResult(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_PUBLIC_RESULT, CliResultPublic(ApiResultPublic(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_SAMPLE_DROPPED_FILES, CliSampleDroppedFiles(ApiSampleDroppedFiles(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_SAMPLE_SCREENSHOTS, CliSampleScreenshots(ApiSampleScreenshots(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_SCAN, CliScan(ApiScan(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_STATE, CliState(ApiState(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_SUMMARY, CliSummary(ApiSummary(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_SYSTEM_BACKEND, CliSystemBackend(ApiSystemBackend(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_SYSTEM_IN_PROGRESS, CliSystemInProgress(ApiSystemInProgress(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_SYSTEM_HEARTBEAT, CliSystemHeartbeat(ApiSystemHeartbeat(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_SYSTEM_STATE, CliSystemState(ApiSystemState(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_SYSTEM_STATS, CliSystemStats(ApiSystemStats(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_SYSTEM_QUEUE_SIZE, CliSystemQueueSize(ApiSystemQueueSize(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_QUOTA, CliQuota(ApiQuota(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_GET_URL_HASH, CliUrlHash(ApiUrlHash(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_REANALYZE_SAMPLE, CliReanalyze(ApiReanalyze(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_SEARCH, CliSearch(ApiSearch(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_SUBMIT_DROPPED_FILE, CliDroppedFileSubmit(ApiDroppedFileSubmit(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_SUBMIT_FILE, CliSubmitFile(ApiSubmitFile(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_SUBMIT_URL_FILE, CliSubmitUrlFile(ApiSubmitFile(config['api_key'], config['api_secret'], config['server']))),
            (ACTION_SUBMIT_URL, CliSubmitUrl(ApiSubmitUrl(config['api_key'], config['api_secret'], config['server']))),
        ])

        request_session = requests.Session()

        api_object_api_key_data = ApiApiKeyData(config['api_key'], config['api_secret'], config['server'])
        api_object_api_key_data.call(request_session, vxapi_cli_headers)
        api_key_data_json_response = api_object_api_key_data.get_response_json()

        if api_object_api_key_data.get_response_status_code() != 200 or api_key_data_json_response['response_code'] != 0:
            base_error_message = 'Can\'t retrieve data for given API Key \'{}\' in the webservice: \'{}\'. Response status code: \'{}\''.format(config['api_key'], config['server'], api_object_api_key_data.get_response_status_code())
            if 'response' in api_key_data_json_response and 'error' in api_key_data_json_response['response']:
                base_error_message += '. Response message: \'{}\''.format(api_key_data_json_response['response']['error'])

            raise RetrievingApiKeyDataError(base_error_message)

        used_api_key_data = api_key_data_json_response['response']
        parser = argparse.ArgumentParser(description=program_name, formatter_class=argparse.ArgumentDefaultsHelpFormatter, add_help=False)
        parser.add_argument('--version', '-ver', action='version', version='{} - version {}'.format(program_name, program_version))
        CliArgumentBuilder(parser).add_help_argument()
    
        subparsers = parser.add_subparsers(help='Action names for \'{}\' auth level'.format(used_api_key_data['auth_level_name']), dest="chosen_action")
    
        for name, cli_object in map_of_available_actions.items():
            if cli_object.api_object.endpoint_auth_level <= used_api_key_data['auth_level']:
                child_parser = subparsers.add_parser(name=name, help=cli_object.help_description, add_help=False)
                cli_object.add_parser_args(child_parser)
    
        args = vars(parser.parse_args())

        if args['chosen_action'] is not None:
            cli_object = map_of_available_actions[args['chosen_action']]
            cli_object.attach_args(args)
            if args['verbose'] is True:
                cli_object.init_verbose_mode()
                print(Color.control('Running \'{}\' in version \'{}\''.format(program_name, program_version)))

                if args['chosen_action'] != 'get_api_limits':
                    # API limits checking should be done here, to ensure that user always will be able to run command in help mode. Also there is no need to run it in non verbose mode.
                    api_object_api_limits = ApiApiLimits(config['api_key'], config['api_secret'], config['server'])
                    api_object_api_limits.call(request_session, vxapi_cli_headers)
                    api_limits_response_json = api_object_api_limits.get_response_json()

                    # Ignore when WebService doesn't have that endpoint
                    if api_object_api_limits.get_response_status_code() != 404:
                        if api_object_api_limits.get_response_status_code() != 200 or api_limits_response_json['response_code'] == -1:
                            raise RetrievingApiKeyDataError('Can\'t check API limits before calling requested endpoint in webservice: \'{}\'. Response status code: \'{}\''.format(config['server'], api_object_api_limits.get_response_status_code()))

                        if api_object_api_limits.get_response_status_code() == 200 and api_limits_response_json['response_code'] == 0 and api_limits_response_json['response']['limit_reached'] is True:
                            name_of_reached_limit = api_limits_response_json['response']['name_of_reached_limit']
                            raise ReachedApiLimitError('Exceeded maximum API requests per {}({}). Please try again later.'.format(name_of_reached_limit, api_limits_response_json['response']['used'][name_of_reached_limit]))

                    if api_object_api_limits.get_response_status_code() == 200 and api_limits_response_json['response_code'] == 0 and api_limits_response_json['response']['used']:
                        api_usage = OrderedDict()
                        api_usage_limits = api_limits_response_json['response']['limits']
                        is_api_limit_reached = False

                        for period, used_limit in api_limits_response_json['response']['used'].items():
                            # Given request is made after checking api limits. It means that we have to add 1 to current limits, to simulate that what happen after making requested API call
                            api_usage[period] = used_limit + 1
                            if is_api_limit_reached is False and api_usage[period] == api_usage_limits[period]:
                                is_api_limit_reached = True

                        print(Color.control('API Limits for used API Key'))
                        print('Webservice API usage limits: {}'.format(api_usage_limits))
                        print('Current API usage: {}'.format(json.dumps(api_usage)))
                        print('Is limit reached: {}'.format(Color.success('No') if is_api_limit_reached is False else Color.error('Yes')))

                print(Color.control('Used API Key'))
                print('API Key: {}'.format(used_api_key_data['api_key']))
                print('Auth Level: {}'.format(used_api_key_data['auth_level_name']))
                if used_api_key_data['user'] is not None:
                    print('User: {} ({})'.format(used_api_key_data['user']['name'], used_api_key_data['user']['email']))

                print(Color.control('Request was sent at ' + '{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())))
                print('Endpoint URL: {}'.format(cli_object.api_object.get_full_endpoint_url()))
                print('HTTP Method: {}'.format(cli_object.api_object.request_method_name.upper()))
                print('Sent GET params: {}'.format(cli_object.api_object.params))
                print('Sent POST params: {}'.format(cli_object.api_object.data))
                print('Sent files: {}'.format(cli_object.api_object.files))

            cli_object.api_object.call(request_session, vxapi_cli_headers)
            if args['verbose'] is True:
                print(Color.control('Received response at ' + '{:%Y-%m-%d %H:%M:%S}'.format(datetime.datetime.now())))
                print('Response status code: {}'.format(cli_object.get_colored_response_status_code()))
                print('Message: {}'.format(cli_object.get_colored_prepared_response_msg()))
                print(Color.control('Showing response'))

            print(cli_object.get_result_msg())
            cli_object.do_post_processing()

            if args['verbose'] is True:
                print('\n')
        else:
            print(Color.control('No option was selected. To check CLI options, run script in help mode: \'{} -h\''.format(__file__)))
    except Exception as exc:
        print(Color.control('During the code execution, error has occurred. Please try again or contact the support.'))
        print(Color.error('Message: \'{}\'.').format(str(exc)) + '\n')
        print(traceback.format_exc())
示例#50
0
    parser.add_argument('--use_senet',
                        help='use_senet',
                        default=False,
                        required=False,
                        type=bool)
    parser.add_argument('--se_reduction',
                        help='se_reduction',
                        default=16,
                        required=False,
                        type=int)

    return parser.parse_args()


if __name__ == '__main__':
    conf = get_config()
    # args = parse_args()
    # conf.eval.transform = trans.Compose([
    #     # trans.Resize(conf.model.input_size),
    #     trans.ToTensor(),
    #     trans.Normalize([0.5], [0.5])
    # ])

    # exp_sequence = ['4@2']
    exp_sequence = ['4@3']
    # format_sequence = ['rgb', 'depth', 'nir']
    format_sequence = ['rgb']
    epoch_range = range(8, 13)

    conf.data_path = Path(
        '/home/users/jiachen.xue/anti_spoofing/data/CASIA-CeFA/phase1')
示例#51
0
def validate_training_SVR(validation_generator, model):
    '''
    This function is used to calculate validation loss during training PointsSVR
    '''
    print("Validating model......")
    with torch.no_grad():
        total_loss = 0
        items = 0
        for input, gtpt, _, _ in validation_generator:  # Image, Point Cloud, model category, model name is given by training generator
            input = input.cuda()
            gtpt = gtpt.cuda()
            predpt = model(input)  # Predict a spare point cloud
            loss, _ = chamfer_distance(predpt, gtpt)
            total_loss += loss.item()
            items += 1

    return total_loss / items  # Return average validation loss


if __name__ == '__main__':
    from config import get_config

    experiment, opt = get_config()

    if opt.train == 'AE':
        train_AE(experiment, opt)

    elif opt.train == 'SVR':
        train_SVR(experiment, opt)
示例#52
0
def init(config_path=None, use_watchdog=False, use_forwarder=False, args=None):
    """Configure the server and the reporting thread.
    """
    c = get_config(parse_args=False, cfg_path=config_path)

    if (not c['use_dogstatsd']
            and (args and args[0] in ['start', 'restart'] or not args)):
        log.info("Dogstatsd is disabled. Exiting")
        # We're exiting purposefully, so exit with zero (supervisor's expected
        # code). HACK: Sleep a little bit so supervisor thinks we've started cleanly
        # and thus can exit cleanly.
        sleep(4)
        sys.exit(0)

    log.debug("Configuring dogstatsd")

    port = c['dogstatsd_port']
    interval = DOGSTATSD_FLUSH_INTERVAL
    api_key = c['api_key']
    aggregator_interval = DOGSTATSD_AGGREGATOR_BUCKET_SIZE
    non_local_traffic = c['non_local_traffic']
    forward_to_host = c.get('statsd_forward_host')
    forward_to_port = c.get('statsd_forward_port')
    event_chunk_size = c.get('event_chunk_size')
    recent_point_threshold = c.get('recent_point_threshold', None)

    target = c['dd_url']
    if use_forwarder:
        target = c['dogstatsd_target']

    hostname = get_hostname(c)

    # Create the aggregator (which is the point of communication between the
    # server and reporting threads.
    assert 0 < interval

    aggregator = MetricsBucketAggregator(
        hostname,
        aggregator_interval,
        recent_point_threshold=recent_point_threshold,
        formatter=get_formatter(c),
        histogram_aggregates=c.get('histogram_aggregates'),
        histogram_percentiles=c.get('histogram_percentiles'),
        utf8_decoding=c['utf8_decoding'])

    # Start the reporting thread.
    reporter = Reporter(interval, aggregator, target, api_key, use_watchdog,
                        event_chunk_size)

    # Start the server on an IPv4 stack
    # Default to loopback
    server_host = c['bind_host']
    # If specified, bind to all addressses
    if non_local_traffic:
        server_host = ''

    server = Server(aggregator,
                    server_host,
                    port,
                    forward_to_host=forward_to_host,
                    forward_to_port=forward_to_port)

    return reporter, server, c
示例#53
0
import config

singleDataRoot = 'simulation_data/single_neuron'

parser = flagparse.FlagParser()
parser.add_flag('--maxThetaFRSweeps')
parser.add_flag('--maxThetaFRSweeps_median')
parser.add_flag('--maxThetaFRSweeps_std')
parser.add_flag('--maxThetaFRHist')
parser.add_flag('--seizureProportion')
args = parser.parse_args()

#ps = ds.getDefaultParamSpaces()

env = NoiseEnvironment(user_config=config.get_config())

if args.maxThetaFRSweeps or args.all:
    env.register_plotter(noisefigs.plotters.MaxMeanThetaFRSweepPlotter)

if args.seizureProportion or args.all:
    env.register_plotter(noisefigs.plotters.PSeizureSweepPlotter)

if args.maxThetaFRSweeps_std or args.all:
    env.register_plotter(noisefigs.plotters.MaxStdThetaFRSweepPlotter)

if args.maxThetaFRSweeps_median or args.all:
    env.register_plotter(noisefigs.plotters.MaxMedianThetaFRSweepPlotter)

if args.maxThetaFRHist or args.all:
    env.register_plotter(noisefigs.plotters.MaxThetaFRHistPlotter)
示例#54
0
from datetime import datetime
from ranker import features
from ranker.estimators import Estimator, LONG_TERM_MODE, SHORT_TERM_MODE
from Queue import Queue
from threading import Thread
from multiprocessing import Pool, Process
import uuid
from models.wrapper import Dual_Encoder_Wrapper, Human_Imitator_Wrapper, HREDQA_Wrapper, CandidateQuestions_Wrapper, DumbQuestions_Wrapper, DRQA_Wrapper, NQG_Wrapper, Echo_Wrapper, Topic_Wrapper, FactGenerator_Wrapper, AliceBot_Wrapper
from models.wrapper import HRED_Wrapper
import logging
logging.basicConfig(
    level=logging.INFO,
    format=
    '%(asctime)s %(name)s.%(funcName)s +%(lineno)s: %(levelname)-8s [%(process)d] %(message)s',
)
conf = config.get_config()

# Script to select model conversation
# Initialize all the models here and then reply with the best answer
# ZMQ version. Here, all models are initialized in a separate thread
# Then the main process sends out next response and context as a PUB msg,
# which every model_client listens in SUB
# Then all models calculate the response and send it to parent using PUSH-PULL
# NN model selection algorithm can therefore work in the parent queue
# to calculate the score of all incoming msgs
# Set a hard time limit to discard the messages which are slow
# N.B. `import zmq` **has to be** the first import.

nlp = spacy.load('en', parser=False, entity=False)

# Utils
示例#55
0
from flask import Flask
from flask.ext.sqlalchemy import SQLAlchemy
from flask.ext.security import Security, SQLAlchemyUserDatastore
from flask_mail import Mail
from flask_boilerplate_utils import Boilerplate
# from flask_boilerplate_utils.menu import Menu
from flask.ext.menu import Menu
#  Initial App Setup
app = Flask(__name__)

# Configure the app.
import config
app.config_class = config.get_config()
app.config.from_object(app.config_class)

# Initialise the boilerplate and do Configuration Magic.
Boilerplate(app)
# Menu(app)
# Setup flask menu
Menu(app)

# Setup the ORM.
import models
app.db = SQLAlchemy(app)
app.db.register_base(models.Base)

# Setup Flask-Security
app.user_datastore = SQLAlchemyUserDatastore(app.db, models.User, models.Role)
security = Security(app, app.user_datastore)
mail = Mail(app)
示例#56
0
def main():
    # torch.autograd.set_detect_anomaly(True)

    torch.backends.cudnn.benchmark = True

    # os.environ['CUDA_LAUNCH_BLOCKING'] = '1'

    args = get_config()[0]

    torch.manual_seed(args.train.seed)
    torch.cuda.manual_seed(args.train.seed)
    torch.cuda.manual_seed_all(args.train.seed)
    np.random.seed(args.train.seed)

    model_dir = os.path.join(args.model_dir, args.exp_name)
    summary_dir = os.path.join(args.summary_dir, args.exp_name)

    if not os.path.isdir(model_dir):
        os.makedirs(model_dir)
    if not os.path.isdir(summary_dir):
        os.makedirs(summary_dir)

    device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    # torch.manual_seed(args.seed)
    args.train.num_gpu = torch.cuda.device_count()
    with open(os.path.join(summary_dir, 'config.yaml'), 'w') as f:
        yaml.dump(args, f)
    if args.data.dataset == 'mnist':
        train_data = MultiMNIST(args, mode='train')
        test_data = MultiMNIST(args, mode='test')
        val_data = MultiMNIST(args, mode='val')
    elif args.data.dataset == 'blender':
        train_data = Blender(args, mode='train')
        test_data = Blender(args, mode='test')
        val_data = Blender(args, mode='val')
    else:
        raise NotImplemented

    train_loader = DataLoader(train_data,
                              batch_size=args.train.batch_size,
                              shuffle=True,
                              drop_last=True,
                              num_workers=6)
    num_train = len(train_data)

    test_loader = DataLoader(test_data,
                             batch_size=args.train.batch_size * 4,
                             shuffle=False,
                             drop_last=True,
                             num_workers=6)
    num_test = len(test_data)

    val_loader = DataLoader(val_data,
                            batch_size=args.train.batch_size * 4,
                            shuffle=False,
                            drop_last=True,
                            num_workers=6)
    num_val = len(val_data)

    model = GNM(args)
    model.to(device)
    num_gpu = 1
    if device.type == 'cuda' and torch.cuda.device_count() > 1:
        print("Let's use", torch.cuda.device_count(), "GPUs!")
        num_gpu = torch.cuda.device_count()
        model = nn.DataParallel(model)
    model.train()

    optimizer = torch.optim.RMSprop(model.parameters(), lr=args.train.lr)

    global_step = 0
    if args.last_ckpt:
        global_step, args.train.start_epoch = \
            load_ckpt(model, optimizer, args.last_ckpt, device)

    args.train.global_step = global_step
    args.log.phase_log = False

    writer = SummaryWriter(summary_dir)

    end_time = time.time()

    for epoch in range(int(args.train.start_epoch), args.train.epoch):

        local_count = 0
        last_count = 0
        for batch_idx, sample in enumerate(train_loader):

            imgs = sample.to(device)

            hyperparam_anneal(args, global_step)

            global_step += 1

            phase_log = global_step % args.log.print_step_freq == 0 or global_step == 1
            args.train.global_step = global_step
            args.log.phase_log = phase_log

            pa_recon, log_like, kl, _, _, _, log = \
                model(imgs)

            aux_kl_pres, aux_kl_where, aux_kl_depth, aux_kl_what, aux_kl_bg, kl_pres, \
            kl_where, kl_depth, kl_what, kl_global_all, kl_bg = kl

            aux_kl_pres_raw = aux_kl_pres.mean(dim=0)
            aux_kl_where_raw = aux_kl_where.mean(dim=0)
            aux_kl_depth_raw = aux_kl_depth.mean(dim=0)
            aux_kl_what_raw = aux_kl_what.mean(dim=0)
            aux_kl_bg_raw = aux_kl_bg.mean(dim=0)
            kl_pres_raw = kl_pres.mean(dim=0)
            kl_where_raw = kl_where.mean(dim=0)
            kl_depth_raw = kl_depth.mean(dim=0)
            kl_what_raw = kl_what.mean(dim=0)
            kl_bg_raw = kl_bg.mean(dim=0)

            log_like = log_like.mean(dim=0)

            aux_kl_pres = aux_kl_pres_raw * args.train.beta_aux_pres
            aux_kl_where = aux_kl_where_raw * args.train.beta_aux_where
            aux_kl_depth = aux_kl_depth_raw * args.train.beta_aux_depth
            aux_kl_what = aux_kl_what_raw * args.train.beta_aux_what
            aux_kl_bg = aux_kl_bg_raw * args.train.beta_aux_bg
            kl_pres = kl_pres_raw * args.train.beta_pres
            kl_where = kl_where_raw * args.train.beta_where
            kl_depth = kl_depth_raw * args.train.beta_depth
            kl_what = kl_what_raw * args.train.beta_what
            kl_bg = kl_bg_raw * args.train.beta_bg

            kl_global_raw = kl_global_all.sum(dim=-1).mean(dim=0)
            kl_global = kl_global_raw * args.train.beta_global

            total_loss = -(log_like - kl_pres - kl_where - kl_depth - kl_what -
                           kl_bg - kl_global - aux_kl_pres - aux_kl_where -
                           aux_kl_depth - aux_kl_what - aux_kl_bg)

            optimizer.zero_grad()
            total_loss.backward()

            clip_grad_norm_(model.parameters(), args.train.cp)
            optimizer.step()

            local_count += imgs.data.shape[0]
            if phase_log:

                bs = imgs.size(0)

                time_inter = time.time() - end_time
                count_inter = local_count - last_count
                print_schedule(global_step, epoch, local_count, count_inter,
                               num_train, total_loss, time_inter)
                end_time = time.time()

                for name, param in model.named_parameters():
                    writer.add_histogram('param/' + name,
                                         param.cpu().detach().numpy(),
                                         global_step)
                    if param.grad is not None:
                        writer.add_histogram('grad/' + name,
                                             param.grad.cpu().detach(),
                                             global_step)
                        if len(param.size()) != 1:
                            writer.add_scalar(
                                'grad_std/' + name + '.grad',
                                param.grad.cpu().detach().std().item(),
                                global_step)
                        writer.add_scalar(
                            'grad_mean/' + name + '.grad',
                            param.grad.cpu().detach().mean().item(),
                            global_step)

                for key, value in log.items():
                    if value is None:
                        continue

                    if key == 'importance_map_full_res_norm':
                        writer.add_histogram(
                            'inside_value/' + key,
                            value[value > 0].cpu().detach().numpy(),
                            global_step)
                    else:
                        writer.add_histogram('inside_value/' + key,
                                             value.cpu().detach().numpy(),
                                             global_step)

                grid_image = make_grid(
                    imgs.cpu().detach()[:args.log.num_summary_img].view(
                        -1, args.data.inp_channel, args.data.img_h,
                        args.data.img_w),
                    args.log.num_img_per_row,
                    normalize=False,
                    pad_value=1)
                writer.add_image('train/1-image', grid_image, global_step)

                grid_image = make_grid(pa_recon[0].cpu().detach()
                                       [:args.log.num_summary_img].clamp(
                                           0,
                                           1).view(-1, args.data.inp_channel,
                                                   args.data.img_h,
                                                   args.data.img_w),
                                       args.log.num_img_per_row,
                                       normalize=False,
                                       pad_value=1)
                writer.add_image('train/2-reconstruction_overall', grid_image,
                                 global_step)

                if args.arch.phase_background:
                    grid_image = make_grid(pa_recon[1].cpu().detach()
                                           [:args.log.num_summary_img].clamp(
                                               0,
                                               1).view(-1,
                                                       args.data.inp_channel,
                                                       args.data.img_h,
                                                       args.data.img_w),
                                           args.log.num_img_per_row,
                                           normalize=False,
                                           pad_value=1)
                    writer.add_image('train/3-reconstruction-fg', grid_image,
                                     global_step)

                    grid_image = make_grid(pa_recon[2].cpu().detach()
                                           [:args.log.num_summary_img].clamp(
                                               0,
                                               1).view(-1, 1, args.data.img_h,
                                                       args.data.img_w),
                                           args.log.num_img_per_row,
                                           normalize=False,
                                           pad_value=1)
                    writer.add_image('train/4-reconstruction-alpha',
                                     grid_image, global_step)

                    grid_image = make_grid(pa_recon[3].cpu().detach()
                                           [:args.log.num_summary_img].clamp(
                                               0,
                                               1).view(-1,
                                                       args.data.inp_channel,
                                                       args.data.img_h,
                                                       args.data.img_w),
                                           args.log.num_img_per_row,
                                           normalize=False,
                                           pad_value=1)
                    writer.add_image('train/5-reconstruction-bg', grid_image,
                                     global_step)

                bbox = visualize(
                    imgs[:args.log.num_summary_img].cpu(),
                    log['z_pres'].view(
                        bs, args.arch.num_cell**2,
                        -1)[:args.log.num_summary_img].cpu().detach(),
                    log['z_where_scale'].view(
                        bs, args.arch.num_cell**2,
                        -1)[:args.log.num_summary_img].cpu().detach(),
                    log['z_where_shift'].view(
                        bs, args.arch.num_cell**2,
                        -1)[:args.log.num_summary_img].cpu().detach(),
                    only_bbox=True,
                    phase_only_display_pres=False)

                bbox = bbox.view(args.log.num_summary_img, -1, 3,
                                 args.data.img_h,
                                 args.data.img_w).sum(1).clamp(0.0, 1.0)
                bbox_img = imgs[:args.log.num_summary_img].cpu().expand(
                    -1, 3, -1, -1).contiguous()
                bbox_img[bbox.sum(dim=1, keepdim=True).expand(-1, 3, -1, -1) > 0.5] = \
                    bbox[bbox.sum(dim=1, keepdim=True).expand(-1, 3, -1, -1) > 0.5]
                grid_image = make_grid(bbox_img,
                                       args.log.num_img_per_row,
                                       normalize=False,
                                       pad_value=1)

                writer.add_image('train/6-bbox', grid_image, global_step)

                bbox_white = visualize(
                    imgs[:args.log.num_summary_img].cpu(),
                    log['z_pres'].view(
                        bs, args.arch.num_cell**2,
                        -1)[:args.log.num_summary_img].cpu().detach(),
                    log['z_where_scale'].view(
                        bs, args.arch.num_cell**2,
                        -1)[:args.log.num_summary_img].cpu().detach(),
                    log['z_where_shift'].view(
                        bs, args.arch.num_cell**2,
                        -1)[:args.log.num_summary_img].cpu().detach(),
                    only_bbox=True,
                    phase_only_display_pres=True)

                bbox_white = bbox_white.view(args.log.num_summary_img, -1, 3,
                                             args.data.img_h,
                                             args.data.img_w).sum(1).clamp(
                                                 0.0, 1.0)
                bbox_white_img = imgs[:args.log.num_summary_img].cpu().expand(
                    -1, 3, -1, -1).contiguous()
                bbox_white_img[bbox_white.sum(dim=1, keepdim=True).expand(-1, 3, -1, -1) > 0.5] = \
                    bbox_white[bbox_white.sum(dim=1, keepdim=True).expand(-1, 3, -1, -1) > 0.5]
                grid_image = make_grid(bbox_white_img,
                                       args.log.num_img_per_row,
                                       normalize=False,
                                       pad_value=1)

                writer.add_image('train/6a-bbox-white', grid_image,
                                 global_step)

                grid_image = make_grid(log['recon_from_q_g'].cpu().detach()
                                       [:args.log.num_summary_img].clamp(
                                           0,
                                           1).view(-1, args.data.inp_channel,
                                                   args.data.img_h,
                                                   args.data.img_w),
                                       args.log.num_img_per_row,
                                       normalize=False,
                                       pad_value=1)
                writer.add_image('train/7-reconstruction_from_q_g', grid_image,
                                 global_step)

                if args.arch.phase_background:
                    grid_image = make_grid(
                        log['recon_from_q_g_fg'].cpu().detach()
                        [:args.log.num_summary_img].clamp(0, 1).view(
                            -1, args.data.inp_channel, args.data.img_h,
                            args.data.img_w),
                        args.log.num_img_per_row,
                        normalize=False,
                        pad_value=1)
                    writer.add_image('train/8-recon_from_q_g-fg', grid_image,
                                     global_step)

                    grid_image = make_grid(
                        log['recon_from_q_g_alpha'].cpu().detach()
                        [:args.log.num_summary_img].clamp(0, 1).view(
                            -1, 1, args.data.img_h, args.data.img_w),
                        args.log.num_img_per_row,
                        normalize=False,
                        pad_value=1)
                    writer.add_image('train/9-recon_from_q_g-alpha',
                                     grid_image, global_step)

                    grid_image = make_grid(
                        log['recon_from_q_g_bg'].cpu().detach()
                        [:args.log.num_summary_img].clamp(0, 1).view(
                            -1, args.data.inp_channel, args.data.img_h,
                            args.data.img_w),
                        args.log.num_img_per_row,
                        normalize=False,
                        pad_value=1)
                    writer.add_image('train/a-background_from_q_g', grid_image,
                                     global_step)

                writer.add_scalar('train/total_loss',
                                  total_loss.item(),
                                  global_step=global_step)
                writer.add_scalar('train/log_like',
                                  log_like.item(),
                                  global_step=global_step)
                writer.add_scalar('train/What_KL',
                                  kl_what.item(),
                                  global_step=global_step)
                writer.add_scalar('train/bg_KL',
                                  kl_bg.item(),
                                  global_step=global_step)
                writer.add_scalar('train/Where_KL',
                                  kl_where.item(),
                                  global_step=global_step)
                writer.add_scalar('train/Pres_KL',
                                  kl_pres.item(),
                                  global_step=global_step)
                writer.add_scalar('train/Depth_KL',
                                  kl_depth.item(),
                                  global_step=global_step)
                writer.add_scalar('train/kl_global',
                                  kl_global.item(),
                                  global_step=global_step)
                writer.add_scalar('train/What_KL_raw',
                                  kl_what_raw.item(),
                                  global_step=global_step)
                writer.add_scalar('train/bg_KL_raw',
                                  kl_bg_raw.item(),
                                  global_step=global_step)
                writer.add_scalar('train/Where_KL_raw',
                                  kl_where_raw.item(),
                                  global_step=global_step)
                writer.add_scalar('train/Pres_KL_raw',
                                  kl_pres_raw.item(),
                                  global_step=global_step)
                writer.add_scalar('train/Depth_KL_raw',
                                  kl_depth_raw.item(),
                                  global_step=global_step)

                writer.add_scalar('train/aux_What_KL',
                                  aux_kl_what.item(),
                                  global_step=global_step)
                writer.add_scalar('train/aux_bg_KL',
                                  aux_kl_bg.item(),
                                  global_step=global_step)
                writer.add_scalar('train/aux_Where_KL',
                                  aux_kl_where.item(),
                                  global_step=global_step)
                writer.add_scalar('train/aux_Pres_KL',
                                  aux_kl_pres.item(),
                                  global_step=global_step)
                writer.add_scalar('train/aux_Depth_KL',
                                  aux_kl_depth.item(),
                                  global_step=global_step)
                writer.add_scalar('train/aux_What_KL_raw',
                                  aux_kl_what_raw.item(),
                                  global_step=global_step)
                writer.add_scalar('train/aux_bg_KL_raw',
                                  aux_kl_bg_raw.item(),
                                  global_step=global_step)
                writer.add_scalar('train/aux_Where_KL_raw',
                                  aux_kl_where_raw.item(),
                                  global_step=global_step)
                writer.add_scalar('train/aux_Pres_KL_raw',
                                  aux_kl_pres_raw.item(),
                                  global_step=global_step)
                writer.add_scalar('train/aux_Depth_KL_raw',
                                  aux_kl_depth_raw.item(),
                                  global_step=global_step)

                writer.add_scalar('train/kl_global_raw',
                                  kl_global_raw.item(),
                                  global_step=global_step)
                writer.add_scalar('train/tau_pres',
                                  args.train.tau_pres,
                                  global_step=global_step)
                for i in range(args.arch.draw_step):
                    writer.add_scalar(f'train/kl_global_raw_step_{i}',
                                      kl_global_all[:, i].mean().item(),
                                      global_step=global_step)

                writer.add_scalar('train/log_prob_x_given_g',
                                  log['log_prob_x_given_g'].mean(0).item(),
                                  global_step=global_step)

                elbo = (log_like.item() - kl_pres_raw.item() -
                        kl_where_raw.item() - kl_depth_raw.item() -
                        kl_what_raw.item() - kl_bg_raw.item() -
                        kl_global_raw.item())

                writer.add_scalar('train/elbo', elbo, global_step=global_step)

                ######################################## generation ########################################

                with torch.no_grad():
                    model.eval()
                    if num_gpu > 1:
                        sample = model.module.sample()[0]
                    else:
                        sample = model.sample()[0]
                    model.train()

                grid_image = make_grid(sample[0].cpu().detach().clamp(0, 1),
                                       args.log.num_img_per_row,
                                       normalize=False,
                                       pad_value=1)
                writer.add_image('generation/1-image', grid_image, global_step)

                if args.arch.phase_background:
                    grid_image = make_grid(sample[1].cpu().detach().clamp(
                        0, 1),
                                           args.log.num_img_per_row,
                                           normalize=False,
                                           pad_value=1)
                    writer.add_image('generation/2-fg', grid_image,
                                     global_step)

                    grid_image = make_grid(sample[2].cpu().detach().clamp(
                        0, 1),
                                           args.log.num_img_per_row,
                                           normalize=False,
                                           pad_value=1)
                    writer.add_image('generation/3-alpha', grid_image,
                                     global_step)

                    grid_image = make_grid(sample[3].cpu().detach().clamp(
                        0, 1),
                                           args.log.num_img_per_row,
                                           normalize=False,
                                           pad_value=1)
                    writer.add_image('generation/4-bg', grid_image,
                                     global_step)

                ###################################### generation end ######################################

                last_count = local_count

        ###################################### ll computing ######################################
        # only for logging, final ll should be computed using 100 particles

        if epoch % args.log.compute_nll_freq == 0:

            print(f'val nll at the end of epoch {epoch}')

            model.eval()
            args.log.phase_nll = True

            elbo_list = []
            kl_list = []
            ll_list = []
            with torch.no_grad():
                args.log.phase_log = False
                for batch_idx, sample in enumerate(val_loader):
                    imgs = sample.to(device)

                    ll_sample_list = []
                    for i in range(args.log.nll_num_sample):
                        _, log_like, kl, log_imp, _, _, _ = \
                            model(imgs)
                        aux_kl_pres, aux_kl_where, aux_kl_depth, aux_kl_what, \
                        aux_kl_bg, kl_pres, kl_where, kl_depth, kl_what, \
                        kl_global_all, kl_bg = kl

                        log_imp_pres, log_imp_depth, log_imp_what, log_imp_where, log_imp_bg, log_imp_g = log_imp

                        ll_sample_list.append(
                            (log_like + log_imp_pres + log_imp_depth +
                             log_imp_what + log_imp_where + log_imp_bg +
                             log_imp_g).cpu())
                        # Only use one sample for elbo
                        if i == 0:
                            elbo_list.append((log_like - kl_pres - kl_where -
                                              kl_depth - kl_what - kl_bg -
                                              kl_global_all.sum(dim=1)).cpu())
                            kl_list.append(
                                (kl_pres + kl_where + kl_depth + kl_what +
                                 kl_bg + kl_global_all.sum(dim=1)).cpu())
                    ll_sample = log_mean_exp(torch.stack(ll_sample_list,
                                                         dim=1),
                                             dim=1)
                    ll_list.append(ll_sample)

                ll_all = torch.cat(ll_list, dim=0)
                elbo_all = torch.cat(elbo_list, dim=0)
                kl_all = torch.cat(kl_list, dim=0)

            writer.add_scalar('val/ll',
                              ll_all.mean(0).item(),
                              global_step=epoch)
            writer.add_scalar('val/elbo',
                              elbo_all.mean(0).item(),
                              global_step=epoch)
            writer.add_scalar('val/kl',
                              kl_all.mean(0).item(),
                              global_step=epoch)

            args.log.phase_nll = False
            model.train()

        if epoch % (args.log.compute_nll_freq * 10) == 0:

            print(f'test nll at the end of epoch {epoch}')

            model.eval()
            args.log.phase_nll = True

            elbo_list = []
            kl_list = []
            ll_list = []
            with torch.no_grad():
                args.log.phase_log = False
                for batch_idx, sample in enumerate(test_loader):
                    imgs = sample.to(device)

                    ll_sample_list = []
                    for i in range(args.log.nll_num_sample):
                        _, log_like, kl, log_imp, _, _, _ = \
                            model(imgs)
                        aux_kl_pres, aux_kl_where, aux_kl_depth, aux_kl_what, \
                        aux_kl_bg, kl_pres, kl_where, kl_depth, kl_what, \
                        kl_global_all, kl_bg = kl

                        log_imp_pres, log_imp_depth, log_imp_what, log_imp_where, log_imp_bg, log_imp_g = log_imp

                        ll_sample_list.append(
                            (log_like + log_imp_pres + log_imp_depth +
                             log_imp_what + log_imp_where + log_imp_bg +
                             log_imp_g).cpu())
                        # Only use one sample for elbo
                        if i == 0:
                            elbo_list.append((log_like - kl_pres - kl_where -
                                              kl_depth - kl_what - kl_bg -
                                              kl_global_all.sum(dim=1)).cpu())
                            kl_list.append(
                                (kl_pres + kl_where + kl_depth + kl_what +
                                 kl_bg + kl_global_all.sum(dim=1)).cpu())
                    ll_sample = log_mean_exp(torch.stack(ll_sample_list,
                                                         dim=1),
                                             dim=1)
                    ll_list.append(ll_sample)

                ll_all = torch.cat(ll_list, dim=0)
                elbo_all = torch.cat(elbo_list, dim=0)
                kl_all = torch.cat(kl_list, dim=0)

            writer.add_scalar('test/ll',
                              ll_all.mean(0).item(),
                              global_step=epoch)
            writer.add_scalar('test/elbo',
                              elbo_all.mean(0).item(),
                              global_step=epoch)
            writer.add_scalar('test/kl',
                              kl_all.mean(0).item(),
                              global_step=epoch)

            args.log.phase_nll = False
            model.train()

        if epoch % args.log.save_epoch_freq == 0 and epoch != 0:
            save_ckpt(model_dir, model, optimizer, global_step, epoch,
                      local_count, args.train.batch_size, num_train)

    save_ckpt(model_dir, model, optimizer, global_step, epoch, local_count,
              args.train.batch_size, num_train)
            plt.subplot(3, 2, 3)
            plt.imshow(proposal_img[:,:,::-1].astype(np.uint8))
            plt.axis('off')

            plt.subplot(3, 2, 4)
            plt.imshow(proposal_label.astype(np.uint8))
            plt.axis('off')

            plt.subplot(3, 2, 5)
            plt.imshow(clamp_array(proposal_msk*255, 0, 255).astype(np.uint8))
            plt.axis('off')

            fig.savefig(out_path, bbox_inches='tight')
            plt.close(fig)

        if cnt > 100:
            break


if __name__ == '__main__':
    config, unparsed = get_config()
    np.random.seed(config.seed)
    random.seed(config.seed)
    torch.manual_seed(config.seed)
    if(config.cuda):
        torch.cuda.manual_seed_all(config.seed)
    prepare_directories(config)

    test_cityscapes_syn_dataloader(config)
def exec_test(config_data):

    add_test_info = AddTestInfo(8, "mon and mon id status")
    add_test_info.started_info()

    try:

        api_mon_ops = APIMonOps(**config_data)
        api_mon_ops.get_mon()
        api_mon_ops.get_mon_id()

        add_test_info.status("ok")

    except Exception, e:
        log.error("test error")
        log.error(e)
        add_test_info.status("error")

    add_test_info.completed_info()


if __name__ == "__main__":
    config_data = config.get_config()

    if not config_data["auth"]:
        log.error("auth failed")

    else:
        exec_test(config_data)
示例#59
0
    from src.image_watcher import ImagesWatcher
    from src.config import get_config
    from src.thread.watcher_thread import WatcherThread
    from src.thread.heartbeat_thread import HeartbeatThread
    from src.thread.worker_thread import WorkerThread
    from src.singleton import SingleInstance

# Constants
# TODO do this different for the dev vs release
# need to allow different configs so I can actually make sure release works
APP_DATA = os.getenv('APPDATA') + '\\PathologyWatchUploader'
CONFIG_FILE = f'{APP_DATA}\\config.json'
LOG_DIR = f'{APP_DATA}\\logs'

# Globals
CONFIG = get_config(CONFIG_FILE)
UPLOAD_QUEUE = queue.Queue()


def on_quit():
    QApplication.quit()


def on_settings(appctxt):
    def fn():
        d = SettingsDialog(appctxt, CONFIG, CONFIG_FILE)
        d.ui.exec_()

    return fn

def train_with_perceptual_loss(mode, dataset, epochs, loss='l1Loss', op='momentum', lr=1e-2, batch_size=4,
          load_model=None, save_dir=None, source=False, start_index = 0, with_grad = False,):
    device = torch.device('cuda:0')
    # Get model 
    model = get_model(mode, dataset, source)
    model.to(device)
    
    #Check if there is trained model
    if load_model is not None:
        model.load_state_dict(torch.load(load_model))

    model.train()
    loss_fn = get_loss(loss)
    loss_fn.to(device)
    optim = get_optim(model, op, lr)

    train_count = int(get_config(dataset,  'train_count'))
    total_it = math.ceil(train_count * epochs / batch_size)

    epoch = train_count // batch_size
    LAMBDA = 1
    cnn = torchvision.models.vgg19(pretrained=True).features.to(device).eval()

    if save_dir is not None:
        if os.path.exists(save_dir) is False:
            os.mkdir(save_dir)

    for i in range(total_it):
        batch_list = list(np.random.randint(1, train_count, size=[batch_size]))

        images, depths = get_train_data(dataset, batch_list)
        images = torch.from_numpy(images).cuda().float()
        depths = torch.from_numpy(depths).cuda()

        optim.zero_grad()

        predict = model(images)
        mask = torch.tensor(depths)

        if dataset is 'Make3D':
            mask = (depths > 0.0) & (depths < 70.0)
        elif dataset is 'NyuV2':
            mask = (depths > 0.0) & (depths < 10.0)

        # per-pixel loss
        loss = torch.tensor([0.0]).float().to(device)
        pixel_loss = loss_fn(predict, depths, mask)
        
        if dataset is 'Make3D':
            depths = depths.clamp_(0., 70.)
            depths = depths / 70.
            predict = predict.clamp_(0., 70.)
            predict = predict / 70.
        elif dataset is 'NyuV2':
            depths = depths.clamp_(0., 10.)
            depths = depths / 10.
            predict = predict.clamp_(0., 10.)
            predict = predict / 10.

        # prepare for perceputal loss
        perceptual_loss_fn = get_loss('perceptualLoss')

        content = torch.stack([depths, depths, depths], dim = 1)
        perceptual_model, content_losses, style_losses = perceptual_loss_fn(cnn, content, content)

        input_perceptual =  torch.stack([predict, predict, predict], dim = 1)
        perceptual_model(input_perceptual)

        style_score = 0
        content_score = 0

        for sl in style_losses:
            style_score += sl.loss
        
        for cl in content_losses:
            content_score += cl.loss

        perceptual_loss = content_score + style_score
        loss =  pixel_loss + LAMBDA * perceptual_loss
        loss.backward()

        optim.step()

        if i % 100 == 0:
            message = 'Epoch [{}/{}]: iter {}: per-pixel loss is {}, features loss is {}'.format(
                i // epoch, epochs, i, pixel_loss.detach().cpu().item(), 
                perceptual_loss.detach().cpu().item())
            print (message)

        if i % epoch == epoch - 1:
            torch.save(model.state_dict(), '{}/{}.pkl'.format(save_dir,start_index + i // epoch))
    pass