Ejemplo n.º 1
0
def configure(name, rec):
    """Write <rec['keys']> to my authorized_keys file."""
    logger.verbose('create_lxc: configuring %s'%name)
    #new_keys = rec['keys']
    
    # get the unix account info
    gid = grp.getgrnam("slices")[2]
    pw_info = pwd.getpwnam(name)
    uid = pw_info[2]
    pw_dir = pw_info[5]

    # write out authorized_keys file and conditionally create
    # the .ssh subdir if need be.
    dot_ssh = os.path.join(pw_dir,'.ssh')
    if not os.path.isdir(dot_ssh):
        if not os.path.isdir(pw_dir):
            logger.verbose('create_lxc: WARNING: homedir %s does not exist for %s!'%(pw_dir,name))
            os.mkdir(pw_dir)
            os.chown(pw_dir, uid, gid)
        os.mkdir(dot_ssh)

    auth_keys = os.path.join(dot_ssh,'authorized_keys')

    for new_keys in rec['keys']:
	tools.write_file(auth_keys, lambda f: f.write(new_keys['key']))

    # set access permissions and ownership properly
    os.chmod(dot_ssh, 0700)
    os.chown(dot_ssh, uid, gid)
    os.chmod(auth_keys, 0600)
    os.chown(auth_keys, uid, gid)

    logger.log('create_lxc: %s: installed ssh keys' % name)
def configure(name, rec):
    """Write <rec['keys']> to my authorized_keys file."""
    logger.verbose('create_lxc: configuring %s' % name)
    #new_keys = rec['keys']

    # get the unix account info
    gid = grp.getgrnam("slices")[2]
    pw_info = pwd.getpwnam(name)
    uid = pw_info[2]
    pw_dir = pw_info[5]

    # write out authorized_keys file and conditionally create
    # the .ssh subdir if need be.
    dot_ssh = os.path.join(pw_dir, '.ssh')
    if not os.path.isdir(dot_ssh):
        if not os.path.isdir(pw_dir):
            logger.verbose(
                'create_lxc: WARNING: homedir %s does not exist for %s!' %
                (pw_dir, name))
            os.mkdir(pw_dir)
            os.chown(pw_dir, uid, gid)
        os.mkdir(dot_ssh)

    auth_keys = os.path.join(dot_ssh, 'authorized_keys')

    for new_keys in rec['keys']:
        tools.write_file(auth_keys, lambda f: f.write(new_keys['key']))

    # set access permissions and ownership properly
    os.chmod(dot_ssh, 0700)
    os.chown(dot_ssh, uid, gid)
    os.chmod(auth_keys, 0600)
    os.chown(auth_keys, uid, gid)

    logger.log('create_lxc: %s: installed ssh keys' % name)
Ejemplo n.º 3
0
    def open_sub_url(self, link, i):

        try:
            content = self.opener.open(link).read().decode('utf-8')
            match_reg = r'<dd class="job_request">.*?<span>(.*?)</span>.*?<ul class="position-label clearfix">(.*?)</ul>.*?<dd class="job_bt">\s+(.*?)\s+</dd>'
            result = re.findall(match_reg, content, re.S | re.M)
            if len(result) != 0:
                result = result[0]
                print(len(result))
                city = result[0].replace('/', '')[:-1]

                print(city)
                brief_info = re.findall(r'<li class="labels">(.*?)</li>',
                                        result[1], re.S | re.M)
                brief_info = ','.join(brief_info)
                #print(brief_info)

                job_descrip = re.compile('</?\w+[^>]*>').sub('', result[2])
                job_descrip = job_descrip.replace('&nbsp;', '')
                job_descrip = job_descrip.replace('&amp;', '')
                #print(job_descrip)
                return city, brief_info, job_descrip
            else:
                #print(content)
                print('正则表达式没有匹配到结果(因为反爬措施或别的状况)')
                write_file([str(i)], './bendi.txt')
                time.sleep(20)
                lagou = Lagou(self.__city, self.__query)
                lagou.read_db()

        except Exception as err:
            print(self.open_sub_url.__name__ + " " + str(err))
Ejemplo n.º 4
0
    def configure(self, rec):
        """Write <rec['keys']> to my authorized_keys file."""
        logger.verbose('account: configuring %s'%self.name)
        new_keys = rec['keys']
        if new_keys != self.keys:
            # get the unix account info
            gid = grp.getgrnam("slices")[2]
            pw_info = pwd.getpwnam(self.name)
            uid = pw_info[2]
            pw_dir = pw_info[5]

            # write out authorized_keys file and conditionally create
            # the .ssh subdir if need be.
            dot_ssh = os.path.join(pw_dir,'.ssh')
            if not os.path.isdir(dot_ssh):
                if not os.path.isdir(pw_dir):
                    logger.verbose('account: WARNING: homedir %s does not exist for %s!'%(pw_dir,self.name))
                    os.mkdir(pw_dir)
                    os.chown(pw_dir, uid, gid)
                os.mkdir(dot_ssh)

            auth_keys = os.path.join(dot_ssh,'authorized_keys')
            tools.write_file(auth_keys, lambda f: f.write(new_keys))

            # set access permissions and ownership properly
            os.chmod(dot_ssh, 0700)
            os.chown(dot_ssh, uid, gid)
            os.chmod(auth_keys, 0600)
            os.chown(auth_keys, uid, gid)

            # set self.keys to new_keys only when all of the above ops succeed
            self.keys = new_keys

            logger.log('account: %s: installed ssh keys' % self.name)
Ejemplo n.º 5
0
def run(bs, path, lr, ks, num_layer):
    fold = 1
    for X_train, Y_train, X_val, Y_val, val_cat in zip(training_data,
                                                       training_label,
                                                       validation_data,
                                                       validation_label,
                                                       validation_cate_label):
        print("Fold " + str(fold))
        model = tools.create_model(lr, bs, ks, num_layer)
        inner_path = path + "/fold_" + str(fold)
        if not os.path.exists(inner_path):
            os.makedirs(inner_path)

        early_stop = EarlyStopping(patience=20)
        history = model.fit(x=X_train,
                            y=Y_train,
                            epochs=80,
                            validation_data=(X_val, Y_val),
                            callbacks=[early_stop],
                            batch_size=bs,
                            verbose=0)
        evaluation = model.evaluate(x=X_val, y=Y_val)
        validation_prediction = model.predict_classes(X_val, batch_size=bs)
        score = f1_score(val_cat, validation_prediction, average=None)

        tools.show_plot(inner_path, history)
        tools.write_file(inner_path + "/readme.txt", evaluation, score, model)
        fold = fold + 1
        del model
Ejemplo n.º 6
0
def zxcs_rar(ori_url):
    content = tools.get_html_content(ori_url, "body .wrap .content .downfile a")
    for item in content.items():
        url = item.attr.href
        text = item.text()
        print "zxcs_rar: ", url, text
        tools.write_file(url, "%s%s%s" % (OUTFILE, text, ".txt"))
Ejemplo n.º 7
0
def zadzs_book_info(ori_url, sort, book_name):
    headers = tools.get_query_header(ori_url)
    link = headers["location"].replace("&amp;", "&")
    book_name = book_name + ".txt"#书籍名称
    file_name = link[(link.rfind("/")+1):len(link)]#下载的书籍文件名称

    tools.write_file("%s%s" % (ROOT_URL, link), "%s%s%s" % (OUTFILE, sort, ".txt"))
    tools.write_file(book_name + "\0" + file_name, "%s%s%s" % (OUTFILE, "flag", ".txt"))
Ejemplo n.º 8
0
def write_logs(log, t=True, p=True):
    if t:
        if log[0] == '\n':
            log = '\n' + tools.get_current_time() + log[1:]
        else:
            log = tools.get_current_time() + log
    if p:
        try:
            print(log)
        except OSError:
            pass
    if config.save_log:
        tools.write_file(config.log_file, log + '\n', 'a')
Ejemplo n.º 9
0
 def run():
     global dump_requested
     while True:
         db_lock.acquire()
         while not dump_requested: db_cond.wait()
         db_pickle = cPickle.dumps(db, cPickle.HIGHEST_PROTOCOL)
         dump_requested = False
         db_lock.release()
         try:
             tools.write_file(DB_FILE, lambda f: f.write(db_pickle))
             logger.log_database(db)
         except:
             logger.log_exc("database.start: failed to pickle/dump")
Ejemplo n.º 10
0
    def download_file_from_url(self, filename, url):
        """
        Download file 'filename' from the provided 'url'.
        """
        msg = "Downloading file '{f}' from '{u}'".format(f=filename,
                                                         u=url)
        printer.substep_info(msg)

        # .- open socket
        url_socket = urllib.request.urlopen(url)
        # .- read from socket
        tools.write_file(filename, url_socket.read(), mode="wb")
        # .- close socket
        url_socket.close()
Ejemplo n.º 11
0
 def update_conf_file(self, cf_rec):
     if not cf_rec['enabled']:
         return
     dest = cf_rec['dest']
     err_cmd = cf_rec['error_cmd']
     mode = int(cf_rec['file_permissions'], base=8)
     try:
         uid = pwd.getpwnam(cf_rec['file_owner'])[2]
     except:
         logger.log('conf_files: cannot find user %s -- %s not updated'
                    %(cf_rec['file_owner'], dest))
         return
     try:
         gid = grp.getgrnam(cf_rec['file_group'])[2]
     except:
         logger.log('conf_files: cannot find group %s -- %s not updated'
                    %(cf_rec['file_group'], dest))
         return
     url = 'https://%s/%s' % (self.config.PLC_BOOT_HOST, cf_rec['source'])
     # set node_id at the end of the request - hacky
     if tools.node_id():
         if url.find('?') > 0:
             url += '&'
         else:
             url += '?'
         url += "node_id=%d"%tools.node_id()
     else:
         logger.log('conf_files: %s -- WARNING, cannot add node_id to request'
                    % dest)
     try:
         logger.verbose("conf_files: retrieving URL=%s"%url)
         contents = curlwrapper.retrieve(url, self.config.cacert)
     except xmlrpc.client.ProtocolError as e:
         logger.log('conf_files: failed to retrieve %s from %s, skipping' % (dest, url))
         return
     if not cf_rec['always_update'] and sha(contents).digest() == self.checksum(dest):
         return
     if self.system(cf_rec['preinstall_cmd']):
         self.system(err_cmd)
         if not cf_rec['ignore_cmd_errors']:
             return
     logger.log('conf_files: installing file %s from %s' % (dest, url))
     try:
         os.makedirs(os.path.dirname(dest))
     except OSError:
         pass
     tools.write_file(dest, lambda f: f.write(contents.decode()),
                      mode=mode, uidgid=(uid, gid))
     if self.system(cf_rec['postinstall_cmd']):
         self.system(err_cmd)
Ejemplo n.º 12
0
 def run():
     global dump_requested
     while True:
         db_lock.acquire()
         while not dump_requested:
             db_cond.wait()
         db_pickle = cPickle.dumps(db, cPickle.HIGHEST_PROTOCOL)
         dump_requested = False
         db_lock.release()
         try:
             tools.write_file(DB_FILE, lambda f: f.write(db_pickle))
             logger.log_database(db)
         except:
             logger.log_exc("database.start: failed to pickle/dump")
Ejemplo n.º 13
0
    def test_restore_function(self):
        """.-
        check it is possible to restore files
        """

        tools.write_file(self.dummy_file_backup, self.text)

        if os.path.exists(self.dummy_file_backup) is False:
            raise Exception(self.no_file_backup_error_msg)

        # .- restore backup_file
        self.assertIsNone(tools.restore_backup_file(self.dummy_file))

        if os.path.exists(self.dummy_file_backup) is True:
            raise Exception(self.file_backup_error_msg)
Ejemplo n.º 14
0
    def test_substitute_in_file(self):
        """
        .- check it is possible to substitute parameters files
        """
        text = "@@LFS_BASE_DIRECTORY@@"

        # .- write file
        tools.write_file(self.dummy_file, text)

        self.assertEqual(tools.read_file(self.dummy_file), text)

        # .- substitute
        tools.substitute_in_file(self.dummy_file, text, config.BASE_DIRECTORY)

        self.assertEqual(tools.read_file(self.dummy_file),
                         config.BASE_DIRECTORY)
Ejemplo n.º 15
0
    def setUp(self):
        self.good_list = [
            "provider", "toolchain", "system", "configuration", "blfs"
        ]

        self.bad_list = [
            "provider", "toolchain", "configuration", "system", "provider",
            "blfs"
        ]

        self.dummy_file = "dummy.txt"
        self.dummy_file_backup = "dummy.txt.bck"
        self.text = "Hello"

        self.element_a = "system"
        self.element_b = "word"

        self.no_tag_command = "configure --prefix=/usr"
        self.tag_command = "<userinput>{}".format(self.no_tag_command)

        self.remap_command = "<userinput remap=\"configure\">{}".format(
            self.no_tag_command)

        self.disabled_command = "<userinput remap=\"lfsbuilder_disabled\">{}".format(
            self.no_tag_command)

        self.default_comment_symbol = "#"
        self.other_comment_symbol = "//"
        self.default_commented_string = "{cs} {c}".format(
            cs=self.default_comment_symbol, c=self.no_tag_command)

        self.other_commented_string = "{cs} {c}".format(
            cs=self.other_comment_symbol, c=self.no_tag_command)

        self.no_file_error_msg = "'{f}' file do not exists".format(
            f=self.dummy_file)
        self.file_error_msg = "'{f}' file should not exists".format(
            f=self.dummy_file)

        self.no_file_backup_error_msg = "'{f}' file do not exists".format(
            f=self.dummy_file_backup)
        self.file_backup_error_msg = "'{f}' file should not exists".format(
            f=self.dummy_file_backup)

        # Ensure 'dummy_file' exists in order to run tests
        tools.write_file(self.dummy_file, self.text)
Ejemplo n.º 16
0
def remake_file(buoys=None, tables=None, remaketype='hdf', remakefrom='txt'):
    '''Remake file from another file if messed up.

    Overwrites existing remaketype files.

    buoys (list): buoys to remake
    tables (list): tables to remake (just for TABS buoys). If buoys is None,
     tables will be read in for each buoy to cover all options.
    remaketype (str), default 'hdf': which type of file to remake
    remakefrom (str), default 'txt': which type of existing file to use to
     remake other file from.
    Options for both are 'hdf' and 'txt'.
    '''

    if buoys is None:
        buoys = bys.index

    # loop through buoys
    for buoy in buoys:

        # pulls out the non-nan table values to loop over valid table names
        if len(buoy) == 1 and tables is None:
            tables = [
                bys.loc[buoy, table] for table in tablekeys
                if not pd.isnull(bys.loc[buoy, table])
            ]
        elif tables is None:
            tables = ['unused']

        for table in tables:  # loop through tables for each buoy
            if len(buoy) == 1:
                assert table is not None, 'need to input table when using TABS buoy'
                fname = path.join('..', 'daily',
                                  'tabs_' + buoy + '_' + table + '_all')
            else:
                fname = path.join('..', 'daily', buoy + '_all')

            # read from remakefrom file, write to remaketype file
            df = tools.read(fname, remakefrom)
            tools.write_file(df,
                             fname,
                             filetype=remaketype,
                             mode='w',
                             append=False)
Ejemplo n.º 17
0
    def test_write_file_function_overwrite(self):
        """
        .- check if it is possible to overwrite files
        """

        # .- write file
        self.assertIsNone(tools.write_file(self.dummy_file, self.text))

        if os.path.exists(self.dummy_file) is False:
            raise Exception(self.no_file_error_msg)
Ejemplo n.º 18
0
def extract_static_to_data():
    url = URL

    data = Spyder(url).open_url()
    # 先写 index.html
    with open(BASE_DIR + "//"+ "index.html", "w+") as f:
        f.writelines(data)
        f.close()

    soup1 = BeautifulSoup(data, 'html.parser')
    csss = soup1.findAll("link")
    scripts = soup1.findAll("script")
    imgs = soup1.findAll("img")
   
    for x in scripts:
        if len(script_to_main(x)) < 1:
            # 直接写整个文本进一个随机js文件
            write_txt_to_file(x, "js")
        else:
            # 二进制形式写入一个js文件
            write_file(script_to_main(x)[0], "js")

    for x in imgs:
        write_file(img_to_main(x)[0], "img")
    
    for x in csss:
        write_file(link_to_main(x)[0], "css")
    return 
Ejemplo n.º 19
0
def remake_file(buoys=None, tables=None, remaketype='hdf', remakefrom='txt'):
    '''Remake file from another file if messed up.

    Overwrites existing remaketype files.

    buoys (list): buoys to remake
    tables (list): tables to remake (just for TABS buoys). If buoys is None,
     tables will be read in for each buoy to cover all options.
    remaketype (str), default 'hdf': which type of file to remake
    remakefrom (str), default 'txt': which type of existing file to use to
     remake other file from.
    Options for both are 'hdf' and 'txt'.
    '''

    if buoys is None:
        buoys = bys.keys()

    # loop through buoys
    for buoy in buoys:

        # pulls out the non-nan table values to loop over valid table names
        if len(buoy) == 1 and tables is None:
            tables = [bys[buoy][table] for table in tablekeys if not pd.isnull(bys[buoy][table])]
        elif tables is None:
            tables = ['unused']

        for table in tables:  # loop through tables for each buoy
            if len(buoy) == 1:
                assert table is not None, 'need to input table when using TABS buoy'
                fname = path.join('..', 'daily', 'tabs_' + buoy + '_' + table + '_all')
            else:
                fname = path.join('..', 'daily', buoy + '_all')

            # read from remakefrom file, write to remaketype file
            df = tools.read(fname, remakefrom)
            tools.write_file(df, fname, filetype=remaketype, mode='w', append=False)
Ejemplo n.º 20
0
    def configure(self, rec):
        """Write <rec['keys']> to my authorized_keys file."""
        logger.verbose('account: configuring %s' % self.name)
        new_keys = rec['keys']
        if new_keys != self.keys:
            # get the unix account info
            gid = grp.getgrnam("slices")[2]
            pw_info = pwd.getpwnam(self.name)
            uid = pw_info[2]
            pw_dir = pw_info[5]

            # write out authorized_keys file and conditionally create
            # the .ssh subdir if need be.
            dot_ssh = os.path.join(pw_dir, '.ssh')
            if not os.path.isdir(dot_ssh):
                if not os.path.isdir(pw_dir):
                    logger.verbose(
                        'account: WARNING: homedir %s does not exist for %s!' %
                        (pw_dir, self.name))
                    os.mkdir(pw_dir)
                    os.chown(pw_dir, uid, gid)
                os.mkdir(dot_ssh)

            auth_keys = os.path.join(dot_ssh, 'authorized_keys')
            tools.write_file(auth_keys, lambda f: f.write(new_keys))

            # set access permissions and ownership properly
            os.chmod(dot_ssh, 0700)
            os.chown(dot_ssh, uid, gid)
            os.chmod(auth_keys, 0600)
            os.chown(auth_keys, uid, gid)

            # set self.keys to new_keys only when all of the above ops succeed
            self.keys = new_keys

            logger.log('account: %s: installed ssh keys' % self.name)
Ejemplo n.º 21
0
def get_pagecon(ori_param_json):
    print ori_param_json["url"] + "============================"

    flag = ori_param_json["file"]
    torrent_data = ""
    img_data = "\n" + ori_param_json["src"]
    flag_data = "\n%s%s%s" % (flag, FLAG, ori_param_json["src"])
    data_filepath = "%s%s%s" % (OUT_DIR, ori_param_json["dir"].decode(
        'utf8').encode('gb2312'), PV190_FILE)
    torrent_filepath = "%s%s%s" % (OUT_DIR, ori_param_json["dir"].decode(
        'utf8').encode('gb2312'), PV190_TORRENT)
    flag_filepath = "%s%s%s" % (OUT_DIR, ori_param_json["dir"].decode(
        'utf8').encode('gb2312'), FLAG_FILE)

    pagecon = tools.get_html_content(ori_param_json["url"], "div", "class",
                                     "pagecon")
    if len(pagecon) <= 0:
        return
    list_ul = pagecon[0].find_all("img")
    for item in list_ul:
        img = item.get("src")
        img_url = "%s%s" % (PV190_ROOTURL, img) if (
            img.startswith("/")) else img
        print img_url
        img_data = "%s\n%s" % (img_data, img_url)
        flag_data = "%s\n%s%s%s" % (flag_data, flag, FLAG, img_url)

    torrent = pagecon[0].get_text()
    torrent_url_list = torrent.split(HTTP_SPLIT)
    if len(torrent_url_list) <= 1:
        return
    for item in torrent_url_list:
        if item.find(".torrent") >= 0:
            torrent_url = HTTP_SPLIT + item[:(item.find(".torrent") + 8)]
            print torrent_url
            torrent_data = "%s\n%s" % (torrent_data, torrent_url)
            flag_data = "%s\n%s%s%s" % (flag_data, flag, FLAG, torrent_url)

    tools.write_file(torrent_data, torrent_filepath)
    tools.write_file(img_data, data_filepath)
    tools.write_file(flag_data, flag_filepath)
Ejemplo n.º 22
0
    def export_playlist_to_m3u(self):
        """ Save the current tracklist to a playlist """
        outfile = fileChooser.save(self.window, _('Export playlist to file'), 'playlist.m3u')

        if outfile is not None:
            tools.write_file(outfile, self.get_m3u_text())
Ejemplo n.º 23
0
                               dend,
                               table=table,
                               usemodel=False,
                               userecent=True,
                               tz=tz)

                # after reading in this data for "recent" plots, save it
                if len(buoy) == 1:
                    fname = path.join('..', 'daily',
                                      'tabs_' + buoy + '_' + table)
                else:
                    fname = path.join('..', 'daily', buoy)
                # write daily data file, for whatever most recent time period
                # data was available
                if df is not None:
                    tools.write_file(df, fname)
                # if there are too few rows to plot, set as None
                if df is not None and len(df) < 2:
                    df = None
                # no model output for stations in bays or outside domain
                now = pd.Timestamp('now', tz='utc').normalize()
                past = now - pd.Timedelta('5 days')
                future = now + pd.Timedelta('4 days')
                # look for model output when buoy is in model domain
                if ~np.isnan(bys.loc[buoy,
                                     'station_number']) and table != 'eng':
                    # read in recent model output, not tied to when data output was found
                    dfmodelrecent = read.read(buoy,
                                              past,
                                              now,
                                              table=table,
Ejemplo n.º 24
0
status += ["Location: %s\nOcupation: %s\n" % (sit.name, sit.ocu)]

status += [
    "    Host : %s\n      IP : %s\nIP local : %s\n    Pais : %s\n(Kernel version : %s (%s bits))\n"
    % (system.Host, system.IPp, system.IPl, system.pais, system.kernel, system.bits)
]


status += ["Dictionaries and lists: %s, %s\n" % (dicts[0].name, lists[0].name)]

status += ["Cams: %s\n" % (cams)]
status += ["External USB devices: %s\n" % (usbs)]
status += ["N of processors: %s\n" % (nproc)]

if "-d" in options:
    print "<============================ JARVIS: Log ============================>"
    for mnsg in status:
        print mnsg
        t.write_file("status.info", mnsg)
    if len(WARNINGS) > 0:
        print "\nLista the warnings:"
        for warn in WARNINGS:
            print "", warn
            t.write_file("WARNING.log", warn)
    print "<=====================================================================>"
else:
    for mnsg in status:
        t.write_file("status.info", mnsg)
    for warn in WARNINGS:
        t.write_file("WARNING.log", warn)
Ejemplo n.º 25
0
def save_cmd(data):
    if tools.write_file(config.cmd_file, data):
        write_logs('命令写入文件时发生错误!', t=False)
Ejemplo n.º 26
0
     dend = pd.Timestamp('now', tz='utc')
 # start 5 days earlier from 00:00 on day of last data, and account for time zones
 # so that enough data is read in for time zone conversion
 tzoffset = (dend.tz_localize(None) - dend.tz_convert(tz).tz_localize(None)).seconds/3600.
 dstart = (dend - pd.Timedelta('5 days')).normalize() + pd.Timedelta(str(tzoffset) + ' hours')
 dend += pd.Timedelta(str(tzoffset) + ' hours')
 df = read.read(buoy, dstart, dend, table=table, usemodel=False,
                userecent=True, tz=tz)
 if len(buoy) == 1:
     fname = path.join('..', 'daily', 'tabs_' + buoy + '_' + table)
 else:
     fname = path.join('..', 'daily', buoy)
 # write daily data file, for whatever most recent time period
 # data was available
 if df is not None:
     tools.write_file(df, fname)
 # if there are too few rows to plot, set as None
 if df is not None and len(df) < 2:
     df = None
 # no model output for stations in bays or outside domain
 now = pd.Timestamp('now', tz='utc').normalize()
 past = now - pd.Timedelta('5 days')
 future = now + pd.Timedelta('4 days')
 if bp.model(buoy, 'rho') and table != 'eng':
     # read in recent model output, not tied to when data output was found
     dfmodelrecent = read.read(buoy, past, now, table=table,
                                     usemodel='recent', tz=tz)
     # read in forecast model output, not tied to when data output was found
     # subtract a day so that models overlap for sure
     dfmodelforecast = read.read(buoy, now - pd.Timedelta('1 day'),
                                 future, table=table,
Ejemplo n.º 27
0
--with-http_ssl_module \
--with-http_gzip_static_module \
--with-pcre=%s \
--with-openssl=%s \
--with-zlib=%s
""" % (nginx_path,nginx_path,base['user_name'],base['user_name'],pcre_path,openssl_path,zlib_path)

# configure
tools.pak_configure(base['soft_name'],tools.filter(nginx_package),base['soft_path'],base['tar_path'],options)

# change the Makefile to tend to adjust the init
makefile_fix = os.path.join(base['tar_path'],nginx_package,'/objs')
makefile = tools.read_file('makefile_fix' + '/Makefile')
result_1 = re.sub(r'./configure','./configure --prefix=/opt/lnmp/app/init',makefile)
result = re.sub(r'./configure --prefix=/opt/lnmp/app/init --disable-shared','./configure --prefix=/opt/lnmp/app/init',result_1)
tools.write_file(makefile_fix,result)

# make
tools.pak_make(tools.filter(nginx_package),base['tar_path'])

# config
for dir in ['sites-enabled','sites-available']:
	tools.make_dir(dir,os.path.join(nginx_path,'conf'))
tools.make_dir('bin',nginx_path)


for file in ['nginx.conf','nginx.sh','herouser_virtualhost']:
	temp_content = tools.read_file(cwd + '/' + file)
	if 'conf' in file:
		temp_file = os.path.join(nginx_path, 'conf') + '/' + file
	elif 'sh' in file:
Ejemplo n.º 28
0
    def export_playlist_to_m3u(self):
        """ Save the current tracklist to a playlist """
        outfile = fileChooser.save(self.window, _('Export playlist to file'), 'playlist.m3u')

        if outfile is not None:
            tools.write_file(outfile, self.get_m3u_text())
Ejemplo n.º 29
0
# config mysql
# directory create init.d
tools.make_dir('init.d',os.path.join(mysql_path,'etc'))

# mysql_install_db
installdb_path = os.path.join(base['tar_base'],tools.filter(mysql_package),'scripts')
installdb_file = install_path + '/mysql_install_db'
installdb_options = """
--user=%s \
--defaults-file=%s \
--datadir=%s \
--basedir=%s
""" % (user_name,os.path.join(mysql_path,'etc') + '/my.cnf', os.path.join(mysql_path,'data'),mysql_path)
os.chmod(installdb_file, stat.S_IEXEC)
os.system('%s %s' % (installdb_file, installdb_options))

# mysql startup scripts and conf
for file in ['my.cnf','mysql.server']:
	temp_content = tools.read_file(cwd + '/' + file)
	if 'cnf' in file:
		temp_file = os.path.join(mysql_path, 'etc') + '/' + file
	elif 'server' in file:
		temp_file = os.path.join(mysql_path, 'etc','init.d') + '/' + file
	tools.write_file(temp_file, temp_content)
	os.chmod(temp_file,stat.S_IRWXU + stat.S_IRWXG)


# config
os.system('chown -R %s.%s %s' % (base['user_name'], base['user_name'], mysql_path))
os.system('chmod -R 755 %s' % mysql_path)
Ejemplo n.º 30
0
def readwrite(buoy, table=None, dstart=pd.Timestamp('1980-1-1', tz='utc')):
    '''Creates or updates buoy data files.

    Reads through yesterday so that when appended to everything is consistent.
    This will take a long time to run if none of the files exist.
    Note that dstart is ignored if buoy data file already exists.
    '''

    # bring data in file up through yesterday. This way files are
    # consistent regardless of what time of day script is run.
    dend = pd.Timestamp('now', tz='UTC').normalize()
    # file write flag
    mode = 'w'
    append = False  # for hdf file

    if len(buoy) == 1:
        assert table is not None, 'need to input table when using TABS buoy'
        fname = path.join('..', 'daily', 'tabs_' + buoy + '_' + table + '_all')
    else:
        fname = path.join('..', 'daily', buoy + '_all')

    # if buoy is inactive and its "all" file exists, don't read
    if buoy in bys.index and not bys.loc[buoy,
                                         'active'] and path.exists(fname):
        return

    # two types of files
    Types = ['txt', 'hdf']

    # if any of the files exist, then we want to make sure they are consistent
    if np.asarray([path.exists(fname + '.' + Type) for Type in Types]).any():
        lastrows = []
        for Type in Types:
            # get last row in file
            try:
                lastrows.append(tools.read(fname, Type, lastlineonly=True))
            # if can't get last row, remake file
            except:
                logging.warning(
                    'Could not access existing file %s of type %s. Will remake.'
                    % (fname, Type))
                # try other type of files to remake this file if needed
                othertype = [temp for temp in Types if temp != Type]
                try:
                    remake_file(buoys=[buoy],
                                tables=[table],
                                remaketype=Type,
                                remakefrom=othertype[0])
                    logging.warning(
                        'Remade file of type %s from type %s for buoy %s' %
                        (Type, othertype[0], buoy))
                except:
                    logging.warning('Could not remake file for buoy %s' %
                                    (buoy))
                # now the file should exist, so can read in lastrow
                lastrows.append(tools.read(fname, Type, lastlineonly=True))

        # if last rows are not the same, remake shorter file
        if not lastrows[0] == lastrows[1]:
            lastrow = lastrows[0]
            lastrow2 = lastrows[1]
            Type = Types[0]
            Type2 = Types[1]
            if lastrow < lastrow2:
                remake_file(buoys=[buoy], remaketype=Type, remakefrom=Type2)
                logging.warning(
                    'File type %s for buoy %s was short and remade with file type %s.'
                    % (Type, buoy, Type2))
            elif lastrow2 < lastrow:
                remake_file(buoys=[buoy], remaketype=Type2, remakefrom=Type)
                logging.warning(
                    'File type %s for buoy %s was short and remade with file type %s.'
                    % (Type2, buoy, Type))

    # now files should be consistent at this point if they already exist
    # if file already exists, overwrite dstart with day after day from last line of file
    if path.exists(fname + '.hdf'):
        dstart = tools.read(fname, Type, lastlineonly=True).normalize(
        ).tz_localize('UTC') + pd.Timedelta('1 days')
        mode = 'a'  # overwrite write mode
        append = True  # overwrite append mode for hdf
    df = read.read(buoy,
                   dstart,
                   dend,
                   table=table,
                   units='M',
                   tz='UTC',
                   usemodel=False,
                   userecent=False)

    # can't append to file with empty dataframe
    if df is not None and not (mode == 'a' and df.empty):
        for Type in Types:
            try:
                tools.write_file(df,
                                 fname,
                                 filetype=Type,
                                 mode=mode,
                                 append=append)
            except:
                logging.warning(
                    'Could not write to file %s of type %s. Will remake.' %
                    (fname, Type))
                # try both other types of files to remake this file if needed
                othertype = [temp for temp in Types if temp != Type]
                try:
                    remake_file(buoys=[buoy],
                                tables=[table],
                                remaketype=Type,
                                remakefrom=othertype[0])
                    logging.warning(
                        'Remade file of type %s from type %s for buoy %s' %
                        (Type, othertype[0], buoy))
                except:
                    logging.warning('Could not remake file for buoy %s' %
                                    (buoy))
    else:
        logging.warning('No new data has been read in for buoy ' + buoy +
                        ' table ' + table)
Ejemplo n.º 31
0
def readwrite(buoy, table=None, dstart=pd.Timestamp('1980-1-1', tz='utc')):
    '''Creates or updates buoy data files.

    Reads through yesterday so that when appended to everything is consistent.
    This will take a long time to run if none of the files exist.
    Note that dstart is ignored if buoy data file already exists.
    '''

    # bring data in file up through yesterday. This way files are
    # consistent regardless of what time of day script is run.
    dend = pd.Timestamp('now', tz='UTC').normalize()
    # file write flag
    mode = 'w'
    append = False  # for hdf file

    if len(buoy) == 1:
        assert table is not None, 'need to input table when using TABS buoy'
        fname = path.join('..', 'daily', 'tabs_' + buoy + '_' + table + '_all')
    else:
        fname = path.join('..', 'daily', buoy + '_all')

    # if buoy is inactive and its "all" file exists, don't read
    if buoy in bys.keys() and not bys[buoy]['active'] and path.exists(fname):
        return

    # two types of files
    Types = ['txt', 'hdf']

    # if any of the files exist, then we want to make sure they are consistent
    if np.asarray([path.exists(fname + '.' + Type) for Type in Types]).any():
        lastrows = []
        for Type in Types:
            # get last row in file
            try:
                lastrows.append(tools.read(fname, Type, lastlineonly=True))
            # if can't get last row, remake file
            except:
                logging.warning('Could not access existing file %s of type %s. Will remake.' % (fname, Type))
                # try other type of files to remake this file if needed
                othertype = [temp for temp in Types if temp != Type]
                try:
                    remake_file(buoys=[buoy], tables=[table], remaketype=Type, remakefrom=othertype[0])
                    logging.warning('Remade file of type %s from type %s for buoy %s' % (Type, othertype[0], buoy))
                except:
                    logging.warning('Could not remake file for buoy %s' % (buoy))
                # now the file should exist, so can read in lastrow
                lastrows.append(tools.read(fname, Type, lastlineonly=True))


        # if last rows are not the same, remake shorter file
        if not lastrows[0] == lastrows[1]:
            lastrow = lastrows[0]; lastrow2 = lastrows[1]
            Type = Types[0]; Type2 = Types[1]
            if lastrow < lastrow2:
                remake_file(buoys=[buoy], remaketype=Type, remakefrom=Type2)
                logging.warning('File type %s for buoy %s was short and remade with file type %s.' % (Type, buoy, Type2))
            elif lastrow2 < lastrow:
                remake_file(buoys=[buoy], remaketype=Type2, remakefrom=Type)
                logging.warning('File type %s for buoy %s was short and remade with file type %s.' % (Type2, buoy, Type))

    # now files should be consistent at this point if they already exist
    # if file already exists, overwrite dstart with day after day from last line of file
    if path.exists(fname + '.hdf'):
        dstart = tools.read(fname, Type, lastlineonly=True).normalize().tz_localize('UTC') + pd.Timedelta('1 days')
        mode = 'a'  # overwrite write mode
        append = True  # overwrite append mode for hdf
    df = read.read(buoy, dstart, dend, table=table, units='M', tz='UTC',
                   usemodel=False, userecent=False)

    # can't append to file with empty dataframe
    if df is not None and not (mode == 'a' and df.empty):
        for Type in Types:
            try:
                tools.write_file(df, fname, filetype=Type, mode=mode, append=append)
            except:
                logging.warning('Could not write to file %s of type %s. Will remake.' % (fname, Type))
                # try both other types of files to remake this file if needed
                othertype = [temp for temp in Types if temp != Type]
                try:
                    remake_file(buoys=[buoy], tables=[table], remaketype=Type, remakefrom=othertype[0])
                    logging.warning('Remade file of type %s from type %s for buoy %s' % (Type, othertype[0], buoy))
                except:
                    logging.warning('Could not remake file for buoy %s' % (buoy))
    else:
        logging.warning('No new data has been read in for buoy ' + buoy + ' table ' + table)