Example #1
0
def show():
    template_data = {
        'content':
        tools.render_page(
            'home', {
                'actual_temp': temp_controller.get_temp_string(),
                'thermostat_status': tools.get_config('thermostat_status'),
                'goal_temp': tools.get_config('goal_temp'),
            })
    }
    return render_template('main.html', **template_data)
Example #2
0
def move_lid(box_status):
    if box_status == "closed": open_close_time = tools.get_config('open_time')
    if box_status == "open": open_close_time = tools.get_config('close_time')
    try:
        if is_valid_date(open_close_time):
            print("moving box")
            if box_status == "open":
                box.close_box()
            else:
                box.open_box()
    except:
        print('could not get open or close time, is setting.json still valid?')
Example #3
0
def show():
    ssid_list = wifi_client.ssid_list()
    template_data = {
        'content':
        tools.render_page(
            "wifi-settings", {
                'wifi_ssid': tools.get_config('wifi_ssid'),
                'wifi_wpa2': tools.get_config('wifi_wpa2'),
                "alert": alert()[1],
                "alert_type": alert()[0],
                "ssid_list": wifi_client.ssid_list()
            })
    }
    return render_template('main.html', **template_data)
Example #4
0
def show():

    content_html = open("templates/open-close-settings.html").read()
    template_data = {
        'content': tools.render_page("open-close-settings",
                                     {"open_time": tools.get_config('open_time'),
                                      "close_time": tools.get_config("close_time"),
                                      "box_status": tools.get_config("box_status"),
                                      "alert": alert()[1],
                                      "alert_type": alert()[0],
                                      "mode": tools.get_config("mode")
                                      })
    }
    return render_template('main.html', **template_data)
Example #5
0
def open():
    if tools.get_config('thermostat_status') == 'closed':
        motor_controller.turn(1, 0.35, 1)
        tools.write_config('thermostat_status', 'open')
        return 'Opend thermostat'
    else:
        return 'thermostat already open'
Example #6
0
def set_times():
    if tools.get_config("box_status") == "open":
        tools.write_config("open_time", request.args.get('open'))
        tools.write_config("close_time", request.args.get('close'))
        return "Open/closing times are set!"
    else:
        return "The box is closed! You can not change these settings now"
Example #7
0
def close():
    if tools.get_config('thermostat_status') == 'open':
        motor_controller.turn(-1, 0.35, 1)
        tools.write_config('thermostat_status', 'closed')
        return 'closed thermostat'
    else:
        return 'thermostat already closed'
Example #8
0
def authorise(scopes=None):

    config = tools.get_config()
    client_id = config['clientid']
    client_secret = config['clientsecret']
    redirect = config['redirecturi']

    if len(sys.argv) > 2:
        username = sys.argv[1]
        choose_playlist = sys.argv[2]
        print("")
        print("Searching playlists for user {}".format(sys.argv[1]))
    else:
        print("To run, enter Spotify username, e.g. {} \"Joe Bloggs\"".format(
            sys.argv[0]))
        sys.exit()

    token = util.prompt_for_user_token(username,
                                       scopes,
                                       client_id=client_id,
                                       client_secret=client_secret,
                                       redirect_uri=redirect)
    print("")
    if token:
        sp = spotipy.Spotify(auth=token)
        print("successfully retrieved token for user {}".format(username))

    else:
        print("Can't get token for {}\n\n".format(username))
        sp = None
    return sp, username, choose_playlist
Example #9
0
class DmpSpider(CrawlSpider):
    name = 'dmp'
    allowed_domains = ['qxs.la']

    config = get_config()
    url = config.get('fiction').get('url')
    book_id = url.split('/')[-2]
    start_urls = [url]

    rules = (
        # Rule(LinkExtractor(allow=r'/225289/\d+'), callback='parse_item', follow=True),
        Rule(LinkExtractor(allow=rf'/{book_id}/\d+'), callback='parse_item', follow=True),
    )

    def parse_item(self, response):

        index = self.get_index(response)
        title = self.get_title(response)
        url = self.get_url(response)
        content = self.get_content(response)

        item = DmpItem(index=index, title=title, url=url, content=content)
        return item

    def get_url(self, response):
        url = "NAN"
        try:
            url = response.url
        except:
            pass
        return url

    def get_index(self, response):
        index = -1
        try:
            index = int(str(response.url).split('/')[-2])
        except:
            pass
        return index

    def get_title(self, response):
        title = "NAN"
        try:
            title = response.xpath('//h1/text()').get()
        except:
            pass
        return title

    def get_content(self, response):
        content = "NAN"
        try:
            content = response.xpath('//div[@id="content"]').xpath('string(.)').get()
            content = re.sub(r'全新的短域名 qxs.la 提供更快更稳定的访问,亲爱的读者们,赶紧把我记下来吧:qxs.la (全小说无弹窗)', '', content)
            content = re.sub(r'ad\d\(\);', '', content)
            content = '\t' + content.strip()
        except:
            pass
        return content
Example #10
0
def main_start():

    # 获取屏幕尺寸
    size = tools.check_os()
    print u'当前使用手机的尺寸为:' + str(size)

    # 获取配置文件
    config = tools.get_config()
    is_auto = config['auto']
    is_yd_ocr = config['yd_ocr']
    # is_debug = config['debug']
    my_str = config['my_str']

    # 根据尺寸及配置文件,获取到当前问题区和空白区
    pixel_json = tools.get_pixel_config(size)
    blank_area = pixel_json['blank_area']
    # question_area = pixel_json['question_area']
    blank_area_point = blank_area['x1'], blank_area['y1'], blank_area[
        'x2'], blank_area['y2']
    # question_area_point = question_area['x1'], question_area['y1'], question_area['x2'], question_area['y2']

    # 循环判断是否为答题页面
    question_num = 0
    crop_img_name = 'image/now.png'
    while True:
        while False:
            img = analyze.tell_and_get_image(is_auto, blank_area_point)
            if img is not None:
                question_num += 1
                break
            else:  # 若不是答题页
                if not is_auto:
                    print('没有发现题目页面')
                    exit(-1)
                print('没有发现答题页面,继续')
                time.sleep(0.8)  # 不是题目页面,休眠0.8秒后继续判断

        img = Image.open('image/backup.png')

        # 获取题目及选项
        start = datetime.datetime.now()  # 记录开始时间
        print u'开始时间为:' + str(start)
        tools.crop_image(img, crop_img_name)
        question, a1, a2, a3 = analyze.image_to_str(crop_img_name, is_yd_ocr,
                                                    my_str)  # 图片转文字
        if question is None or question == '':
            print('\n没有识别题目')
            exit(-1)
        result_list = search.geturl(question, a1, a2, a3)  # 搜索结果
        for x in result_list:
            print x

        run_time = (datetime.datetime.now() - start).seconds
        print('本次运行时间为:{}秒'.format(run_time))
        if is_auto:
            time.sleep(25)  # 每一道题结束,休息10秒
        else:
            break
Example #11
0
def check_temps():
    thermostat_status = tools.get_config('thermostat_status')
    goal_temp = tools.get_config('goal_temp')
    actual_temp = temp_controller.get_temp_float()

    print(f'goal_temp: {goal_temp}')
    print(f'actual_temp: {actual_temp}')

    if actual_temp >= goal_temp:
        if thermostat_status == 'open':
            print('closing')
            motor_controller.turn(-1, 0.35, 1)
            tools.write_config('thermostat_status', 'closed')
    else:
        if thermostat_status == 'closed':
            print('opening')
            motor_controller.turn(1, 0.35, 1)
            tools.write_config('thermostat_status', 'open')
Example #12
0
def merge_chapters():
    config = get_config()
    name = config.get('fiction').get('name')
    myclient = pymongo.MongoClient("mongodb://localhost:27017/")
    mydb = myclient["fcitions"]
    mycol = mydb[name]

    for data in mycol.find().sort("index"):
        content = data.get('content')
        content = content.replace('  ', '\n\t')
        with open(f'./{name}.txt', 'a', encoding='utf-8') as f:
            f.write(data.get('title') + '\r\n\r\n' + content + '\r\n\r\n')
    print('合成结束')
Example #13
0
    def google_geocode_service(self, address):
        """ Method to access the geocoding service by Google
            address: (string) Address for which latitude longitudes are required. Must be space separated

            Returns: None or GeoPoint with the latitude, longitude and full address
        """
        if self.google_api_key is None:
            logger.error('Are you sure google api credentials are set?')
            return None
        address = "?address=" + address
        app_key = "&key=" + self.google_api_key
        request_url = _GOOGLE_BASE_URL + address + app_key
        api_response = self.request(url=request_url)
        if api_response is None:
            return None
        if get_config(api_response, self._GOOGLE_STATUS_CONFIG) == "OK":
            lat_long = get_config(api_response, self._GOOGLE_LAT_LONG_CONFIG)
            addr = get_config(api_response, self._GOOGLE_ADDR_CONFIG)
            pt = GeoPoint(latitude=lat_long['lat'], longitude=lat_long['lng'], address=addr)
            return pt
        logger.error('Bad request for google API, status received: ' + get_config(api_response, self._GOOGLE_STATUS_CONFIG))
        return None
Example #14
0
def check_times():
    if tools.config_is_set("open_time") or tools.config_is_set("close_time"):
        tools.write_config('mode', 'automatic')
    else:
        tools.write_config('mode', 'manual')
    box_status = tools.get_config("box_status")
    times = tools.time_left(box_status)
    days = times[0]
    hours = times[1]
    minutes = times[2]
    seconds = times[3]
    time_left_total = days + hours + minutes + seconds
    if time_left_total < 1 or days < 0:
        move_lid(box_status)
Example #15
0
    def here_geocode_service(self, address):
        """ Method to access the geocoding service by HERE
            address: (string) Address for which latitude longitudes are required. Must be space separated

            Returns: None or GeoPoint with the latitude, longitude and full address
        """
        if self.here_app_id is None or self.here_app_code is None:
            logger.error('Are you sure here app credentials are set?')
            return None
        app_id = "?app_id=" + self.here_app_id
        app_code = "&app_code=" + self.here_app_code
        address = "&searchtext=" + address
        request_url = _HERE_BASE_URL + app_id + app_code + address
        api_response = self.request(url=request_url)
        if api_response is None:
            return None
        if get_config(api_response, self._HERE_STATUS_CONFIG):
            lat_long = get_config(api_response, self._HERE_LAT_LONG_CONFIG)
            addr = get_config(api_response, self._HERE_ADDR_CONFIG)
            pt = GeoPoint(latitude=lat_long['Latitude'], longitude=lat_long['Longitude'], address=addr)
            return pt
        logger.error('Bad request, no data received')
        return None
Example #16
0
def show():

    open_time = tools.get_config("open_time")
    close_time = tools.get_config("close_time")
    wifi_ssid = tools.get_config("wifi_ssid")
    wifi_strength = tools.get_config("wifi_strength")
    time_left_arr = tools.time_left(box_status)
    mode = tools.get_config("mode")
    template_data = {
        'content':
        tools.render_page(
            "status", {
                'box_status': box_status(),
                "future_box_status": future_box_status(),
                'open_time': open_time,
                "close_time": close_time,
                "time_left": time_left_arr,
                "wifi_ssid": wifi_ssid,
                "mode": mode
            })
    }

    return render_template('main.html', **template_data)
Example #17
0
    def _get_ftp_config(self):
        """ Save FTP connection parameters from ir.values to self """
        self._host = get_config(self._pool, self._cr, 'lx_host') or 'localhost'
        self._port = get_config(self._pool, self._cr, 'lx_port', int) or 21
        self._user = get_config(self._pool, self._cr, 'lx_user') or ''
        self._password = get_config(self._pool, self._cr, 'lx_password') or ''
        self._timeout = get_config(self._pool, self._cr, 'lx_timeout', int) or 10
        self._mode = get_config(self._pool, self._cr, 'lx_mode').upper() or 'TEST'
        self._passive = get_config(self._pool, self._cr, 'lx_passive', bool) or True

        message = _("Please check your LX1 configuration settings in LX1 Sync > Configuration > LX1 Configuration Settings for field '%s'")

        if not self._mode in ['PROD', 'TEST']:
            raise osv.except_osv(_('Config Error'), _('Please check your LX1 configuration settings in Settings -> Parameters -> System Parameters. Mode must be either "prod" or "test".'))
        if not self._host:
            raise osv.except_osv(_('Config Error'), message % 'host')
Example #18
0
  def warmup(self, fconfig, model, deterministic = True):
    np.random.seed(2012310818)
    config = get_config(fconfig)

    self.path = config['path']
    self.paraf = config['pfile']
    self.samples = config['samples']
    self.opt = config['opt']
    self.store = config['pstore']
    self.Arange = config['Arange']
    self.epochs = config['epochs']
    self.sliced = config['sliced']
    self.batchsize = config['batchsize']
    self.decay = config['decay']
    self.acc = config['acc']
    self.recoveru = config['update']
    self.logf = fconfig[:-4] + 'log'
    self.dat_patch = h5.File(config['path'] + 'dat_patch_' + str(config['Arange']) + '.h5')
    self.dat_pmask = h5.File(config['path'] + 'dat_patch_' + str(config['Arange']) + '.h5')

    self.model = model
    self.model.load(config)
    self.model.build(deterministic = True)

    with open(config['path'] + 'parameter_' + str(config['Arange']) + '.json', 'r') as f:
      self.meta = json.load(f)

    with open(config['path'] + ('unsliced.json' if self.sliced == False else 'sliced.json')) as f:
      self.dat_idx = json.load(f)

    self.train_idx = self.dat_idx['train']
    self.val_idx = self.dat_idx['val']
    self.test_idx = self.dat_idx['test']


    self.idx = [i + 1 for i, t in enumerate(self.opt) if t == 1]
    sops = ['segment', 'perspective', 'angle', 'mal']
    print(('%d training samples, %d validation samples, %d test samples...with option: '
      + ''.join([sops[i - 1] + ' ' for i in self.idx]))
       %  tuple([len(self.train_idx), len(self.val_idx), len(self.test_idx)]))
    self.tmp_dir = self.path + self.store 
    mkdir(self.tmp_dir)
Example #19
0
    def __init__(self, request_type, user_name, scope=None):

        self.scope = scope
        self.request_type = request_type
        self.token = None
        config = tools.get_config()
        self.client_id = config['clientid']
        self.client_secret = config['clientsecret']
        self.redirect = config['redirecturi']
        self.user_name = user_name
        self.auth = None
        self.user = None
        self.results = None

        if scope is None:
            if request_type in scopes.actions:
                self.scope = scopes.actions[request_type]
            else:
                raise ValueError('request type not allowed')

        token = util.prompt_for_user_token(self.user_name,
                                           self.scope,
                                           client_id=self.client_id,
                                           client_secret=self.client_secret,
                                           redirect_uri=self.redirect)
        print("")
        if token:
            self.auth = spotipy.Spotify(auth=token)
            print("successfully retrieved token for user {}".format(
                self.user_name))

        else:
            print("Can't get token for {}\n\n".format(self.user_name))
            self.auth = None

        try:
            self.user = User(self.auth, name=self.user_name)
        except ValueError:
            print('check user name...')
Example #20
0
    def GET(self, map_name, format, *args, **kwargs):

        mf = get_mapfile(map_name)
        with open(mf.path, "r") as f:
            data = f.read()
        return {"mapfile": ({
                "name": map_name,
                "href": "%s/maps/%s.map" % (web.ctx.home, map_name),
                "workspaces": href("%s/maps/%s/workspaces.%s" % (web.ctx.home, map_name, format)),
                "layers": href("%s/maps/%s/layers.%s" % (web.ctx.home, map_name, format)),
                "layergroups": href("%s/maps/%s/layergroups.%s" % (web.ctx.home, map_name, format)),
                "styles": href("%s/maps/%s/styles.%s" % (web.ctx.home, map_name, format)),
                "wms_capabilities": href("%smap=%s&REQUEST=GetCapabilities&VERSION=%s&SERVICE=WMS" % (
                            get_config("mapserver")["url"], mf.path, get_config("mapserver")["wms_version"])),
                "wfs_capabilities": href("%smap=%s&REQUEST=GetCapabilities&VERSION=%s&SERVICE=WFS" % (
                            get_config("mapserver")["url"], mf.path, get_config("mapserver")["wfs_version"])),
                "wcs_capabilities": href("%smap=%s&REQUEST=GetCapabilities&VERSION=%s&SERVICE=WCS" % (
                            get_config("mapserver")["url"], mf.path, get_config("mapserver")["wcs_version"])),
                    })
            } if format != "map" else data
Example #21
0
    def _get_ftp_config(self):
        """ Save FTP connection parameters from ir.values to self """
        self._host = get_config(self._pool, self._cr, 'lx_host') or 'localhost'
        self._port = get_config(self._pool, self._cr, 'lx_port', int) or 21
        self._user = get_config(self._pool, self._cr, 'lx_user') or ''
        self._password = get_config(self._pool, self._cr, 'lx_password') or ''
        self._timeout = get_config(self._pool, self._cr, 'lx_timeout',
                                   int) or 10
        self._mode = get_config(self._pool, self._cr,
                                'lx_mode').upper() or 'TEST'
        self._passive = get_config(self._pool, self._cr, 'lx_passive',
                                   bool) or True

        message = _(
            "Please check your LX1 configuration settings in LX1 Sync > Configuration > LX1 Configuration Settings for field '%s'"
        )

        if not self._mode in ['PROD', 'TEST']:
            raise osv.except_osv(
                _('Config Error'),
                _('Please check your LX1 configuration settings in Settings -> Parameters -> System Parameters. Mode must be either "prod" or "test".'
                  ))
        if not self._host:
            raise osv.except_osv(_('Config Error'), message % 'host')
Example #22
0
def alert():
    if tools.get_config('box_status') == "closed" and tools.get_config('mode') == 'automatic':
        return "danger", "The box is closed, you can not change these settings until the box opens at the set time"
    else:
        return "", ""
Example #23
0
                logging.info("test success")
                return 1
            else:
                logging.info("new_version = " + new_version)
                logging.info("this_version = " + this_version)
                return 0
        except Exception as e:
            logging.warning(e)
            return 0
    else:
        return 0


if __name__ == '__main__':
    with open('testconfig.ini', 'r', encoding='utf-8') as f:
        conf = tools.get_config(f.readlines())
    for c in conf.items():
        logging.info(c)
    num = int(conf.get("upgrade_times"))
    test_ip = conf.get("upgrade_ip")
    test_url = 'http://' + test_ip
    pw = conf.get("admin_pw")
    new_build = conf.get("new_build")
    new_version = conf.get("new_version")
    wait = int(conf.get("wait_time2"))
    ssh = paramiko.SSHClient()
    ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
    version = '0.0.0.1'
    cmd1 = 'sed -i "s/DISTRIB_RELEASE=.*/DISTRIB_RELEASE=\\"'
    cmd2 = '\\"/g" /etc/openwrt_release'
    cmd = cmd1 + version + cmd2
Example #24
0
        time.sleep(2)
    else:
        return 0
    ssid = Page_script.get_24ssid(driver)
    if ssid == old_24ssid:
        logging.info('test success')
        return 1
    else:
        logging.warning("===test fail===")
        logging.warning('ssid= %s', ssid)
        logging.warning('oldssid= %s', old_24ssid)
        return 0

if __name__ == '__main__':
    op = open('testconfig.ini', 'r', encoding='utf-8')
    conf = tools.get_config(op.readlines())
    op.close()
    logging.info(conf)
    num = int(conf.get("reset_times"))
    test_ip = conf.get("reset_ip")
    test_url = 'http://'+test_ip
    pw = conf.get("admin_pw")
    new_ssid = conf.get("new_ssid")
    old_24ssid = conf.get("old_24ssid")
    wait_time = int(conf.get("wait_time1"))
    fail = 0
    for i in range(num):
        logging.info('====run test==== %s', i+1)
        chrome = webdriver.Chrome()
        if tools.ping_ok(test_ip) == 1:
            if dotest(chrome, test_url) == 1:
Example #25
0
class lx_file_outgoing(osv.osv):
    """
    A record is created each time a file is to be uploaded to LX1. It includes the XML of the uploaded file,
    a reference field pointing to the uploaded object and some time details 
    """

    _name = 'lx.file.outgoing'
    _rec_name = 'upload_file_name'
    _order = 'create_date DESC'

    _uploadable_models = [
        ('product.product', 'Product'),
        ('res.partner', 'Partner'),
        ('purchase.order', 'Purchase Order'),
        ('sale.order', 'sale Order'),
        ('stock.picking', 'Stock Picking'),
    ]

    _columns = {
        'create_date':
        fields.datetime('Create Date', readonly=True),
        'upload_date':
        fields.datetime('Upload Date', readonly=True),
        'state':
        fields.selection(
            (
                ('to_upload', 'To Upload'),
                ('uploaded', 'Uploaded'),
            ),
            'State',
            help=
            "The state represents this record's stage in the upload process",
            required=True),
        'failed':
        fields.boolean(
            'Failed',
            help="Indicates there was a problem while uploading the file",
            readonly=True),
        'xml':
        fields.text('XML',
                    required=True,
                    help="The XML that should be uploaded to LX1"),
        'content_type':
        fields.selection((('xml', 'XML'), ('pdf', 'PDF')),
                         'Content Type',
                         readonly=True),
        'result':
        fields.text(
            'Failure Message',
            help=
            "Any errors encountered during file upload will be listed here",
            readonly=True),
        'object_type':
        fields.char('Object Type',
                    size=64,
                    required=True,
                    help="The type of data contained in this file",
                    readonly=True),
        'upload_file_name':
        fields.char('Uploaded File Name',
                    size=64,
                    help="The name of the file to be uploaded",
                    readonly=True),
        'parent_file_id':
        fields.many2one(
            'lx.file.outgoing',
            'Parent File',
            help=
            "This file is an attachment, so the parent file is the file referencing this attachment",
            readonly=True),
        'attachment_file_ids':
        fields.one2many('lx.file.outgoing',
                        'parent_file_id',
                        'Attachments',
                        help="List of attachments uploaded with this file",
                        readonly=True),
        'record_id':
        fields.reference('Record To Upload',
                         _uploadable_models,
                         128,
                         readonly=True),
        'record_names':
        fields.text(
            'Records To Upload',
            help=
            "A list of all records that are contained in this file if more than one",
            readonly=True),
    }

    _defaults = {
        'content_type':
        'xml',
        'state':
        'to_upload',
        'upload_file_name':
        lambda obj, cr, uid, context: '%s_%s.xml' %
        (get_config(obj.pool, cr, 'lx_company_name'),
         obj.pool.get('ir.sequence').get(cr, uid, 'lx.file.outgoing')),
    }

    def write(self, cr, uid, ids, vals, context=None):
        """ add update_date to vals automatically """
        if vals.get('state') == 'uploaded' and 'upload_date' not in vals:
            vals['upload_date'] = datetime.now()
        return super(lx_file_outgoing, self).write(cr,
                                                   uid,
                                                   ids,
                                                   vals,
                                                   context=context)

    def unlink(self, cr, uid, ids, context=None):
        raise osv.except_osv(
            _('Cannot Delete'),
            _('Deletion has been disabled for file outgoing records because it is important to maintain a complete audit trail'
              ))

    def upload(self, cr, uid, ids, context=None):
        """ Uploads file to ftp server then sets state to uploaded """
        lx_manager = self.pool.get('lx.manager')

        for file_outgoing in self.browse(cr, uid, ids):
            try:
                with lx_manager.connection(cr) as conn:
                    file_name = conn.upload_file_outgoing(
                        cr, uid, file_outgoing)
                    file_outgoing.write({'state': 'uploaded'})
                    return file_name
            except lx_manager.ftp_exceptions as e:
                raise except_osv(_("Upload Problem"), \
                        _("".join(["There was a problem uploading the data to the LX1 servers.\n\n",
                                   "Please check your connection settings in ",
                                   "Setings > Parameters > System Parameters and make sure ",
                                   "your IP is in the LX1 FTP whitelist.\n\n",
                                   "%s""" % unicode(e)])))

    def delete_upload(self, cr, uid, ids, context=None):
        """ Deletes a file that has been uploaded """
        lx_manager = self.pool.get('lx.manager')

        for file_outgoing in self.browse(cr, uid, ids):
            if not file_outgoing.upload_file_name:
                continue

            with lx_manager.connection(cr) as conn:
                conn.delete_file_outgoing(cr, uid, file_outgoing)
                file_outgoing.write({'state': 'to_upload'})
Example #26
0
def future_box_status():
    box_status = tools.get_config("box_status")
    if box_status == "closed":
        return 'open'
    else:
        return "close"
Example #27
0
def main():
    """
    Acquire args and config
    """
    args = parse_args()
    assert (os.path.exists(args.config))
    assert (args.schedule in ['step1', 'mixed', 'st', 'st_mixed'])
    assert ((args.multigpus == False and args.ngpu >= 0)
            or (args.multigpus == True and args.ngpu > 1))
    assert (not (args.val and args.resume_from > 0))
    config = get_config(args.config)
    assert (not (args.val and config['init_model'] == 'none'
                 and args.init_model == 'none'))
    if args.init_model != 'none':
        assert (os.path.exists(args.init_model))
        config['init_model'] = args.init_model
    """
    Path to save results.
    """
    dataset_path = os.path.join(config['save_path'], config['dataset'])
    if not os.path.exists(dataset_path):
        os.makedirs(dataset_path)

    save_path = os.path.join(dataset_path, args.experimentid)
    if not os.path.exists(save_path) and not args.val:
        os.makedirs(save_path)

    if args.schedule == 'step1':
        model_path = os.path.join(save_path, 'models')
    elif args.schedule == 'mixed':
        model_path = os.path.join(save_path, 'models_transfer')
    elif args.schedule == 'st':
        model_path = os.path.join(save_path, 'models_st')
    else:
        model_path = os.path.join(save_path, 'models_st_transfer')
    if args.resume_from > 0:
        assert (os.path.exists(model_path))
    if not os.path.exists(model_path) and not args.val:
        os.makedirs(model_path)

    if args.schedule == 'step1':
        log_file = os.path.join(save_path, 'logs.txt')
    elif args.schedule == 'mixed':
        log_file = os.path.join(save_path, 'logs_transfer.txt')
    elif args.schedule == 'st':
        log_file = os.path.join(save_path, 'logs_st.txt')
    else:
        log_file = os.path.join(save_path, 'logs_st_transfer.txt')
    if args.val:
        log_file = os.path.join(dataset_path, 'logs_test.txt')
    logger = logWritter(log_file)

    if args.schedule == 'step1':
        config_path = os.path.join(save_path, 'configs.yaml')
    elif args.schedule == 'mixed':
        config_path = os.path.join(save_path, 'configs_transfer.yaml')
    elif args.schedule == 'st':
        config_path = os.path.join(save_path, 'configs_st.yaml')
    else:
        config_path = os.path.join(save_path, 'configs_st_transfer.yaml')
    """
    Start
    """
    if args.val:
        print("\n***Testing of model {0}***\n".format(config['init_model']))
        logger.write("\n***Testing of model {0}***\n".format(
            config['init_model']))
    else:
        print("\n***Training of model {0}***\n".format(args.experimentid))
        logger.write("\n***Training of model {0}***\n".format(
            args.experimentid))
    """
    Continue train or train from scratch
    """
    if args.resume_from >= 1:
        assert (args.val == False)
        if not os.path.exists(config_path):
            assert 0, "Old config not found."
        config_old = get_config(config_path)
        if config['save_path'] != config_old['save_path'] or config[
                'dataset'] != config_old['dataset']:
            assert 0, "New config does not coordinate with old config."
        config = config_old
        start_iter = args.resume_from
        print(
            "Continue training from Iter - [{0:0>6d}] ...".format(start_iter +
                                                                  1))
        logger.write(
            "Continue training from Iter - [{0:0>6d}] ...".format(start_iter +
                                                                  1))
    else:
        start_iter = 0
        if not args.val:
            shutil.copy(args.config, config_path)
            print("Train from scratch ...")
            logger.write("Train from scratch ...")
    """
    Modify config
    """
    if args.schedule == 'step1':
        config['back_scheduler']['init_lr'] = config['back_opt']['lr']
    elif args.schedule == 'mixed':
        config['back_scheduler']['init_lr_transfer'] = config['back_opt'][
            'lr_transfer']
    elif args.schedule == 'st':
        config['back_scheduler']['init_lr_st'] = config['back_opt']['lr_st']
    else:
        config['back_scheduler']['init_lr_st_transfer'] = config['back_opt'][
            'lr_st_transfer']

    if args.schedule == 'step1':
        config['back_scheduler']['max_iter'] = config['ITER_MAX']
    elif args.schedule == 'mixed':
        config['back_scheduler']['max_iter_transfer'] = config[
            'ITER_MAX_TRANSFER']
    elif args.schedule == 'st':
        config['back_scheduler']['max_iter_st'] = config['ITER_MAX_ST']
    else:
        config['back_scheduler']['max_iter_st_transfer'] = config[
            'ITER_MAX_ST_TRANSFER']
    """
    Schedule method
    """
    s = "Schedule method: {0}".format(args.schedule)
    if args.schedule == 'mixed' or args.schedule == 'st_mixed':
        s += ", interval_step1={0}, interval_step2={1}".format(
            config['interval_step1'], config['interval_step2'])
    s += '\n'
    print(s)
    logger.write(s)
    """
    Use GPU
    """
    device = torch.device("cuda")
    if not args.multigpus:
        torch.cuda.set_device(args.ngpu)
    torch.backends.cudnn.benchmark = True
    """
    Get dataLoader
    """
    vals_cls, valu_cls, all_labels, visible_classes, visible_classes_test, train, val, sampler, visibility_mask, cls_map, cls_map_test = get_split(
        config)
    assert (visible_classes_test.shape[0] == config['dis']['out_dim_cls'] - 1)

    dataset = get_dataset(config['DATAMODE'])(
        train=train,
        test=None,
        root=config['ROOT'],
        split=config['SPLIT']['TRAIN'],
        base_size=513,
        crop_size=config['IMAGE']['SIZE']['TRAIN'],
        mean=(config['IMAGE']['MEAN']['B'], config['IMAGE']['MEAN']['G'],
              config['IMAGE']['MEAN']['R']),
        warp=config['WARP_IMAGE'],
        scale=(0.5, 1.5),
        flip=True,
        visibility_mask=visibility_mask)

    loader = torch.utils.data.DataLoader(
        dataset=dataset,
        batch_size=config['BATCH_SIZE']['TRAIN'],
        num_workers=config['NUM_WORKERS'],
        sampler=sampler)

    dataset_test = get_dataset(config['DATAMODE'])(
        train=None,
        test=val,
        root=config['ROOT'],
        split=config['SPLIT']['TEST'],
        base_size=513,
        crop_size=config['IMAGE']['SIZE']['TEST'],
        mean=(config['IMAGE']['MEAN']['B'], config['IMAGE']['MEAN']['G'],
              config['IMAGE']['MEAN']['R']),
        warp=config['WARP_IMAGE'],
        scale=None,
        flip=False)

    loader_test = torch.utils.data.DataLoader(
        dataset=dataset_test,
        batch_size=config['BATCH_SIZE']['TEST'],
        num_workers=config['NUM_WORKERS'],
        shuffle=False)
    """
    Load Class embedding
    """
    class_emb = get_embedding(config)
    class_emb_vis = class_emb[visible_classes]
    class_emb_vis_ = torch.zeros(
        (config['ignore_index'] + 1 - class_emb_vis.shape[0],
         class_emb_vis.shape[1]),
        dtype=torch.float32)
    class_emb_vis_aug = torch.cat((class_emb_vis, class_emb_vis_), dim=0)
    class_emb_all = class_emb[visible_classes_test]
    """
    Get trainer
    """
    trainer = Trainer(
        cfg=config,
        class_emb_vis=class_emb_vis_aug,
        class_emb_all=class_emb_all,
        schedule=args.schedule,
        checkpoint_dir=model_path,  # for model loading in continued train
        resume_from=start_iter  # for model loading in continued train
    ).to(device)
    if args.multigpus:
        trainer.model = torch.nn.DataParallel(trainer.model,
                                              device_ids=range(args.ngpu))
    """
    Train/Val
    """
    if args.val:
        """
        Only do validation
        """
        loader_iter_test = iter(loader_test)
        targets, outputs = [], []

        while True:
            try:
                data_test, gt_test, image_id = next(
                    loader_iter_test
                )  # gt_test: torch.LongTensor with shape (N,H,W). elements: 0-19,255 in voc12
            except:
                break  # finish test

            data_test = torch.Tensor(data_test).to(device)

            with torch.no_grad():
                try:
                    test_res = trainer.test(data_test,
                                            gt_test,
                                            multigpus=args.multigpus)
                except MeaninglessError:
                    continue  # skip meaningless batch

                pred_cls_test = test_res['pred_cls_real'].cpu(
                )  # torch.LongTensor with shape (N,H',W'). elements: 0-20 in voc12
                resized_gt_test = test_res['resized_gt'].cpu(
                )  # torch.LongTensor with shape (N,H',W'). elements: 0-19,255 in voc12

                ##### gt mapping to target #####
                resized_target = cls_map_test[resized_gt_test]

            for o, t in zip(pred_cls_test.numpy(), resized_target):
                outputs.append(o)
                targets.append(t)

        score, class_iou = scores_gzsl(targets,
                                       outputs,
                                       n_class=len(visible_classes_test),
                                       seen_cls=cls_map_test[vals_cls],
                                       unseen_cls=cls_map_test[valu_cls])

        print("Test results:")
        logger.write("Test results:")

        for k, v in score.items():
            print(k + ': ' + json.dumps(v))
            logger.write(k + ': ' + json.dumps(v))

        score["Class IoU"] = {}
        for i in range(len(visible_classes_test)):
            score["Class IoU"][all_labels[
                visible_classes_test[i]]] = class_iou[i]
        print("Class IoU: " + json.dumps(score["Class IoU"]))
        logger.write("Class IoU: " + json.dumps(score["Class IoU"]))

        print("Test finished.\n\n")
        logger.write("Test finished.\n\n")

    else:
        """
        Training loop
        """
        if args.schedule == 'step1':
            ITER_MAX = config['ITER_MAX']
        elif args.schedule == 'mixed':
            ITER_MAX = config['ITER_MAX_TRANSFER']
        elif args.schedule == 'st':
            ITER_MAX = config['ITER_MAX_ST']
        else:
            ITER_MAX = config['ITER_MAX_ST_TRANSFER']
        assert (start_iter < ITER_MAX)

        # dealing with 'st_mixed' is the same as dealing with 'mixed'
        if args.schedule == 'st_mixed':
            args.schedule = 'mixed'
        assert (args.schedule in ['step1', 'mixed', 'st'])

        if args.schedule == 'step1':
            step_scheduler = Const_Scheduler(step_n='step1')
        elif args.schedule == 'mixed':
            step_scheduler = Step_Scheduler(config['interval_step1'],
                                            config['interval_step2'],
                                            config['first'])
        else:
            step_scheduler = Const_Scheduler(step_n='self_training')

        iteration = start_iter
        loader_iter = iter(loader)
        while True:
            if iteration == start_iter or iteration % 1000 == 0:
                now_lr = trainer.get_lr()
                print("Now lr of dis: {0:.10f}".format(now_lr['dis_lr']))
                print("Now lr of gen: {0:.10f}".format(now_lr['gen_lr']))
                print("Now lr of back: {0:.10f}".format(now_lr['back_lr']))
                logger.write("Now lr of dis: {0:.10f}".format(
                    now_lr['dis_lr']))
                logger.write("Now lr of gen: {0:.10f}".format(
                    now_lr['gen_lr']))
                logger.write("Now lr of back: {0:.10f}".format(
                    now_lr['back_lr']))

                sum_loss_train = np.zeros(config['loss_count'],
                                          dtype=np.float64)
                sum_acc_real_train, sum_acc_fake_train = 0, 0
                temp_iter = 0

                sum_loss_train_transfer = 0
                sum_acc_fake_train_transfer = 0
                temp_iter_transfer = 0

            # mode should be constant 'step1' in non-zero-shot-learning
            # mode should be switched between 'step1' and 'step2' in zero-shot-learning
            mode = step_scheduler.now()
            assert (mode in ['step1', 'step2', 'self_training'])

            if mode == 'step1' or mode == 'self_training':
                try:
                    data, gt = next(loader_iter)
                except:
                    loader_iter = iter(loader)
                    data, gt = next(loader_iter)

                data = torch.Tensor(data).to(device)

            if mode == 'step1' or mode == 'step2':
                try:
                    loss = trainer.train(data,
                                         gt,
                                         mode=mode,
                                         multigpus=args.multigpus)
                except MeaninglessError:
                    print("Skipping meaningless batch...")
                    continue
            else:  # self training mode
                try:
                    with torch.no_grad():
                        test_res = trainer.test(data,
                                                gt,
                                                multigpus=args.multigpus)
                        resized_gt_for_st = test_res['resized_gt'].cpu(
                        )  # torch.LongTensor with shape (N,H',W'). elements: 0-14,255 in voc12
                        sorted_indices = test_res['sorted_indices'].cpu(
                        )  # torch.LongTensor with shape (N,H',W',C)
                        gt_new = construct_gt_st(resized_gt_for_st,
                                                 sorted_indices, config)
                    loss = trainer.train(data,
                                         gt_new,
                                         mode='step1',
                                         multigpus=args.multigpus)
                except MeaninglessError:
                    print("Skipping meaningless batch...")
                    continue

            if mode == 'step1' or mode == 'self_training':
                loss_G_GAN = loss['loss_G_GAN']
                loss_G_Content = loss['loss_G_Content']
                loss_B_KLD = loss['loss_B_KLD']
                loss_D_real = loss['loss_D_real']
                loss_D_fake = loss['loss_D_fake']
                loss_D_gp = loss['loss_D_gp']
                loss_cls_real = loss['loss_cls_real']
                loss_cls_fake = loss['loss_cls_fake']
                acc_cls_real = loss['acc_cls_real']
                acc_cls_fake = loss['acc_cls_fake']

                sum_loss_train += np.array([
                    loss_G_GAN, loss_G_Content, loss_B_KLD, loss_D_real,
                    loss_D_fake, loss_D_gp, loss_cls_real, loss_cls_fake
                ]).astype(np.float64)
                sum_acc_real_train += acc_cls_real
                sum_acc_fake_train += acc_cls_fake
                temp_iter += 1

                tal = sum_loss_train / temp_iter
                tsar = sum_acc_real_train / temp_iter
                tsaf = sum_acc_fake_train / temp_iter

                # display accumulated average loss and accuracy in step1
                if (iteration + 1) % config['display_interval'] == 0:
                    print("Iter - [{0:0>6d}] AAL: G_G-[{1:.4f}] G_C-[{2:.4f}] B_K-[{3:.4f}] D_r-[{4:.4f}] D_f-[{5:.4f}] D_gp-[{6:.4f}] cls_r-[{7:.4f}] cls_f-[{8:.4f}] Acc: cls_r-[{9:.4f}] cls_f-[{10:.4f}]".format(\
                            iteration + 1, tal[0], tal[1], tal[2], tal[3], tal[4], tal[5], tal[6], tal[7], tsar, tsaf))
                if (iteration + 1) % config['log_interval'] == 0:
                    logger.write("Iter - [{0:0>6d}] AAL: G_G-[{1:.4f}] G_C-[{2:.4f}] B_K-[{3:.4f}] D_r-[{4:.4f}] D_f-[{5:.4f}] D_gp-[{6:.4f}] cls_r-[{7:.4f}] cls_f-[{8:.4f}] Acc: cls_r-[{9:.4f}] cls_f-[{10:.4f}]".format(\
                                iteration + 1, tal[0], tal[1], tal[2], tal[3], tal[4], tal[5], tal[6], tal[7], tsar, tsaf))

            elif mode == 'step2':
                loss_cls_fake_transfer = loss['loss_cls_fake']
                acc_cls_fake_transfer = loss['acc_cls_fake']

                sum_loss_train_transfer += loss_cls_fake_transfer
                sum_acc_fake_train_transfer += acc_cls_fake_transfer
                temp_iter_transfer += 1

                talt = sum_loss_train_transfer / temp_iter_transfer
                tsaft = sum_acc_fake_train_transfer / temp_iter_transfer

                # display accumulated average loss and accuracy in step2 (transfer learning)
                if (iteration + 1) % config['display_interval'] == 0:
                    print("Iter - [{0:0>6d}] Transfer Learning: aal_cls_f-[{1:.4f}] acc_cls_f-[{2:.4f}]".format(\
                            iteration + 1, talt, tsaft))
                if (iteration + 1) % config['log_interval'] == 0:
                    logger.write("Iter - [{0:0>6d}] Transfer Learning: aal_cls_f-[{1:.4f}] acc_cls_f-[{2:.4f}]".format(\
                            iteration + 1, talt, tsaft))

            else:
                raise NotImplementedError('Mode {} not implemented' % mode)

            # Save the temporary model
            if (iteration + 1) % config['snapshot'] == 0:
                trainer.save(model_path, iteration, args.multigpus)
                print(
                    "Temporary model of Iter - [{0:0>6d}] successfully stored.\n"
                    .format(iteration + 1))
                logger.write(
                    "Temporary model of Iter - [{0:0>6d}] successfully stored.\n"
                    .format(iteration + 1))

            # Test the saved model
            if (iteration + 1) % config['snapshot'] == 0:
                print(
                    "Testing model of Iter - [{0:0>6d}] ...".format(iteration +
                                                                    1))
                logger.write(
                    "Testing model of Iter - [{0:0>6d}] ...".format(iteration +
                                                                    1))

                loader_iter_test = iter(loader_test)
                targets, outputs = [], []

                while True:
                    try:
                        data_test, gt_test, image_id = next(
                            loader_iter_test
                        )  # gt_test: torch.LongTensor with shape (N,H,W). elements: 0-19,255 in voc12
                    except:
                        break  # finish test

                    data_test = torch.Tensor(data_test).to(device)

                    with torch.no_grad():
                        try:
                            test_res = trainer.test(data_test,
                                                    gt_test,
                                                    multigpus=args.multigpus)
                        except MeaninglessError:
                            continue  # skip meaningless batch

                        pred_cls_test = test_res['pred_cls_real'].cpu(
                        )  # torch.LongTensor with shape (N,H',W'). elements: 0-20 in voc12
                        resized_gt_test = test_res['resized_gt'].cpu(
                        )  # torch.LongTensor with shape (N,H',W'). elements: 0-19,255 in voc12

                        ##### gt mapping to target #####
                        resized_target = cls_map_test[resized_gt_test]

                    for o, t in zip(pred_cls_test.numpy(), resized_target):
                        outputs.append(o)
                        targets.append(t)

                score, class_iou = scores_gzsl(
                    targets,
                    outputs,
                    n_class=len(visible_classes_test),
                    seen_cls=cls_map_test[vals_cls],
                    unseen_cls=cls_map_test[valu_cls])

                print("Test results:")
                logger.write("Test results:")

                for k, v in score.items():
                    print(k + ': ' + json.dumps(v))
                    logger.write(k + ': ' + json.dumps(v))

                score["Class IoU"] = {}
                for i in range(len(visible_classes_test)):
                    score["Class IoU"][all_labels[
                        visible_classes_test[i]]] = class_iou[i]
                print("Class IoU: " + json.dumps(score["Class IoU"]))
                logger.write("Class IoU: " + json.dumps(score["Class IoU"]))

                print("Test finished.\n")
                logger.write("Test finished.\n")

            step_scheduler.step()

            iteration += 1
            if iteration == ITER_MAX:
                break

        print("Train finished.\n\n")
        logger.write("Train finished.\n\n")
Example #28
0
def temp_min():
    old_temp = tools.get_config('goal_temp')
    tools.write_config('goal_temp', old_temp - 0.5)
    return f'new goal temp: {old_temp - 0.5}'
Example #29
0
def close_box():
    if tools.get_config('mode') == 'automatic':
        print('Box in automatic mode, manual control disabled')
    else:
        box.close_box()
    return "Box close script called"
Example #30
0
# https://github.com/toggl/toggl_api_docs/blob/master/toggl_api.md
from requests import post
from tools import (
    get_config, import_times, get_delta_datetime
)

config = get_config()
times = import_times()

api_url = "https://api.track.toggl.com/api/v8/time_entries"

for entry in times:
    r = post(
        api_url,
        auth=(config["TOGGL_API"]["key"], "api_token"),
        json={
            "time_entry":
            {
                "description": entry['desc'],
                "created_with": "https://github.com/howzitcal/pyTogglTimes",
                "start": f"{entry['start']}T{entry['from']}:00{config['TOGGL_API']['timezone']}",
                "duration": get_delta_datetime(f"{entry['start']} {entry['from']}", f"{entry['start']} {entry['to']}"),
                "billable": entry["billable"],
                "pid": config['TOGGL_PROJECT_CODES'][entry["project"]],
                "wid": config["TOGGL_API"]["wid"]
            }
        }
    )
    if r.status_code != 200:
        print(
            f"[{r.text}] [{entry['start']} {entry['from']} -> {entry['to']}] [{entry['desc']}]")
Example #31
0
    def read_config(self):
        # create mock file

        config = get_config()
        self.assertEqual(2, 1)
Example #32
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

from tools import get_config

GLOBAL_CONFIG = get_config()
Example #33
0
import logging

import mapfile

import webapp
from webapp import HTTPCompatible, urlmap, get_data

import tools
from tools import get_mapfile, get_mapfile_workspace, get_config, href, assert_is_empty

from pyxml import Entries


from extensions import plugins

mralogs.setup(get_config("logging")["level"], get_config("logging")["file"],
              get_config("logging")["format"])

class index(object):
    def GET(self, format):
        return "This is MRA."

class mapfiles(object):
    @HTTPCompatible()
    def GET(self, format):
        mapfiles = []
        for path in tools.get_mapfile_paths():
            try:
                mf = mapfile.Mapfile(path)
            except IOError, OSError:
                continue
Example #34
0
def box_status():
    return tools.get_config("box_status")