Пример #1
0
def update_queues_conf_file(filename, queues_info):
    # read specific slurm.conf
    if not os.path.isfile(filename):
        logger.error("%s is not exists", filename)
        return -1

    partitions_conf = get_partition_conf_from_queues(queues_info)
    logger.info("partition conf will be %s", partitions_conf)
    partition_begin = False
    buf = StringIO.StringIO()
    with open(filename, 'r') as f:
        for line in f:
            if line.startswith("PartitionName=controller ") and \
                    partition_begin is False:
                partition_begin = True
                buf.write(line)
            elif partition_begin:
                if not line.startswith("PartitionName="):
                    partition_begin = False
                    # put Slurm Partition conf here
                    buf.write(partitions_conf)
                    buf.write("\n")
                else:
                    # skip current Partition conf
                    logger.info("current partition conf %s is skipped", line)
            else:
                buf.write(line)

    content = buf.getvalue()
    buf.close()

    write_file(filename, content)
    return 0
Пример #2
0
    def get_run_activitys(self):
        '''
        获取运行的Activitie列表
        :return:个数,接口列表
        '''
        startnumber = None
        endnumber = None
        actlen = None
        actlist = []

        try:
            with open(self.monkeylog) as f_w:
                result =  f_w.read()
                if re.findall("Total activities", result) and re.findall("How many Events Dropped", result):
                    with open(self.monkeylog) as f_w:
                        for number,line in  enumerate(f_w.readlines()):
                            if re.findall("Total activities",line):
                                actlen = line.split("Total activities")[1].replace("\n","").strip()
                                startnumber = number + 1
                            elif re.findall("How many Events Dropped",line):
                                endnumber = number
                        act_result = linecache.getlines(self.monkeylog)[startnumber:endnumber]
                        for act in act_result:
                            actlist.append(str(act).split("-")[1].replace("\n","").strip())
                else:
                    logger.info("{}文件中未查询到activity列表".format(self.monkeylog))
                common.write_file(run_activity_path, actlist)
        except Exception as e:
            logger.error('获取activity列表异常:{}'.format(e))
        finally:
            return actlen, actlist
def channel_playlist():
    channels, _code = _fetch_channels()
    if not channels:
        return _code, -1, -1
    channels_sorted = sorted(channels.values(), key=lambda _channel: _channel.weight)
    if cfg.channel_group == 1:
        group = c.default_group_name
    else:
        group = cfg.channel_group_name
    if cfg.my_script == 1:
        streamer = c.pipe + os.path.join(cfg.playlist_path, cfg.my_script_name)
    else:
        streamer = c.pipe + os.path.join(cfg.playlist_path, cfg.playlist_streamer)
    playlist_src = '#EXTM3U\n'
    playlist_dst = '#EXTM3U\n'
    _num = 0
    _err = 0
    for channel in channels_sorted:
        try:
            _log("Adding: " + c.to_string(channel.name))
            playlist_src += '#EXTINF:-1, %s\n%s\n' % (c.to_string(channel.name), c.to_string(channel.url()))
            playlist_dst += c.build_channel_lines(channel, cfg.channel_logo ,_logo_path_file(channel.name), streamer, group, cfg.playlist_type, cfg.channel_epg_name, cfg.channel_epg_id, cfg.channel_group)
            _num += 1
        except ChannelIsNotBroadcastingError:
            _err += 1
            _log("... Not broadcasting. Skipped.")
        except AuthenticationError:
            return c.authent_error, 0, 0
        except TooManyDevicesError:
            return c.toomany_error, 0, 0
    c.write_file(playlist_src, os.path.join(cfg.playlist_path, cfg.playlist_src), _log)
    c.write_file(playlist_dst, os.path.join(cfg.playlist_path, cfg.playlist_dst), _log)
    return 'OK', _num, _err
Пример #4
0
def collect_html(args):
    """
    Collect html data locally, so you don't have to 
    keep hitting the url.
    
    :param args: args for collect html
    :type args: Namespacd
    """
    url_list = args.url_list
    output_dir = args.output_dir

    print(url_list)

    # do some checks
    try: 
        assert os.path.exists(url_list), 'url_list must exist'
        assert os.path.exists(output_dir), 'output_dir must exist'
    except AssertionError as err: 
        logger.error('Failed check: {}'.format(err)) 
        return 

    urls = common.read_file(url_list)
    
    for url in urls: 
        logger.debug(url) 

        html = spy_tools.collect_html(url)
        out = url.split('/')
        output = os.path.join(output_dir, out[-1] + '.html')
        common.write_file(html, output)
Пример #5
0
def main():
    #argparser = argparse.ArgumentParser(description="Compares two csv files and produces a result file.")
    #argparser.add_argument("files", metavar="F", type=open, nargs="+", help="files to process")
    #args = argparser.parse_args()
    #print(args)

    print(sys.argv)

    if len(sys.argv) < 5:
        print("Usage: main.py <first_file> <second_file> <output_file> <second_file_format> [-e]")
        print("       Use a -e parameter to print errors during parsing.")
        sys.exit(1)

    second_file_parser = get_parser(sys.argv[4])

    print("The '{0}' parser has been selected for the second file by format '{1}'".
          format(second_file_parser.type, sys.argv[4]))

    print_errors = len(sys.argv) > 5 and sys.argv[5] == '-e'

    first_list = common.parse_file(sys.argv[1], rigla.Parser(), print_errors)
    second_list = common.parse_file(sys.argv[2], second_file_parser, print_errors)

    list_of_compared_results = comparer.Comparer(first_list, second_list).compare()

    common.write_file(sys.argv[3], list_of_compared_results.not_in_first_list,
                      list_of_compared_results.not_in_second_list, list_of_compared_results.in_both_lists)
Пример #6
0
  def create_env_script(self):
    common.logit("\nCreating VO frontend env script.")
    data = """#!/bin/bash
source %(condor_location)s/condor.sh
""" % { "condor_location" : self.condor_location(),}
    common.write_file("w",0644,self.env_script(),data)
    common.logit("VO frontend env script created: %s" % self.env_script() )
Пример #7
0
def get_ca_cert(fb, gid):

    # Make a Facebook query call to fetch data
    results = fb.fql.query('SELECT data FROM SecureGridNet.ipopdata \
              WHERE _id IN (SELECT obj_id FROM SecureGridNet.certificate \
              WHERE gid = ' + gid + ')')

    # Get the last result, there should be only one
    for result in results:
        data = result['data']

    # Return if certificate is not found
    if(data == ''):
        return -1

    # Certificate is base64 encoded, so it needs to be decoded
    cert = common.fb_decode(data)

    # Save certificate to floppy image
    common.write_file('/mnt/fd/cacert.pem', cert)

    # Set certificate directory
    rdir = "/etc/racoon/certs"

    # Prepare ca key for racoon
    os.system("cp -f /mnt/fd/cacert.pem " + rdir + "/.")
    os.system("ln -sf " + rdir + "/cacert.pem " + rdir + "/`openssl x509 -noout \
           -hash -in " + rdir + "/cacert.pem`.0")

    return 0
Пример #8
0
  def __create_condor_config__(self):
    """ This first updates the primary condor_config with either:
          a. the gwms condor_config file if a tarball install
          b. the config.d directory containing the gwms config files
        Then it creates the individual condor config files.
    """
    #if len(self.colocated_services) > 0:
    #  return  # we've already updated this
    common.logit("... updating: %s" % self.condor_config())
    common.logit("    to point to GWMS config files directory")
    cfg_data = """
########################################################
# Using local configuration file directory below
########################################################
LOCAL_CONFIG_FILE = 
LOCAL_CONFIG_DIR  = %s
""" % (self.local_config_dir())
    common.write_file("a",0644,self.condor_config(),cfg_data,SILENT=False)
    stdout = glideinwms.lib.subprocessSupport.iexe_cmd("tail -5 %s" % self.condor_config())
    common.logit(stdout)

    common.logit("\nCreating GWMS condor_config files in:")
    common.logit("%s" % self.local_config_dir())
    common.make_directory(self.local_config_dir(),self.username(),0755)
    types =  self.condor_config_data.keys()
    types.sort()
    for type in types:
      filename = "%s/%s.config" % (self.local_config_dir(),type)
      common.logit("    %s" % os.path.basename(filename))
      common.write_file("w",0644,filename,self.condor_config_data[type],SILENT=True)
    self.__create_secondary_schedd_dirs__()
Пример #9
0
def check_cert(fb):

    # Get object id from file os.system
    obj_id = common.read_file('fb_req_obj_id.txt')

    # Check Facebook for certificate
    results = fb.fql.query('SELECT id, data FROM SecureGridNet.ipopdata \
              WHERE _id IN (SELECT obj_id FROM SecureGridNet.certificate \
              WHERE gid = ' + obj_id + ')')

    # Get result from Facebook database
    id = ''
    data = ''
    for result in results:
        id = result['id']
        data = result['data']

    # Return if certificate is not found
    if(data == ''):
        return -1

    # Check to see if the certificate is for the current user
    if(id != obj_id):
        return -1

    # Decode data to cert
    cert = common.fb_decode(data)

    # Write certificate to file
    common.write_file('/etc/racoon/certs/host-cert.pem', cert)

    return 0
Пример #10
0
def plot_vehicle_status():
    fig, axs = plt.subplots(3, figsize=(8, 4))
    # fig.set_size_inches(18.5, 10.5)
    plt.subplots_adjust(hspace=0.5, top=0.95, bottom=0.05)
    ax = axs[1]
    ax2 = axs[0]
    # ax = fig.gca()
    # ax2 = ax.twiny()
    ax.set_yticks([])
    ax2.set_yticks([])
    ax.set_xlim([0, 140])
    ax2.set_xlim([0, 7500])
    ax.set_xlabel("Speed (KM/H)")
    ax2.set_xlabel("RPM")
    # plt.show()
    cur_spd_rect = ax.barh(0.05, 1, color = 'cyan', height = 0.25, edgecolor='none')
    # set_spd_rect = ax.barh(0.25, 1, color = 'b', height = 0.05, edgecolor='none')
    rpm_rect = ax2.barh(0.6, 1, color = 'tomato', height = 0.25, edgecolor='none')

    axamp = axs[2]
    samp = Slider(axamp, 'Set Spd', 0.0, 140.0, valinit=0)
    gear_label = plt.text(70, 3.3, "  " ,ha='center', va='center', size='x-large', weight='bold')
    while(1):
        set_spd = int(samp.val)
        write_file("set_speed.txt", set_spd)
        cur_spd_rect.patches[0].set_width(read_file('cur_speed.txt', float))
        rpm_rect.patches[0].set_width(read_file('rpm.txt', float))
        gear_label.set_text("Gear %d" % read_file('gear.txt', int))
        plt.pause(0.2)
Пример #11
0
def channel_playlist():
    global _channel_group_, _channel_groupname_, _myscript_, _myscript_name_, _channel_logo_, \
        _playlist_type_, _channel_epgname_, _channel_epgid_, _ffmpeg_, _no_error_
    channels, _code = _fetch_channels()
    if not channels:
        return _code, -1, -1

    channels_sorted = sorted(channels.values(),
                             key=lambda _channel: _channel.weight)
    if _channel_group_ == 1:
        group = c.default_group_name
    else:
        group = _channel_groupname_

    if _myscript_ == 1:
        streamer = c.pipe + os.path.join(_playlist_path_, _myscript_name_)
    else:
        streamer = c.pipe + os.path.join(_playlist_path_, _playlist_streamer_)

    playlist_src = '#EXTM3U\n'
    playlist_dst = '#EXTM3U\n'
    _num = 0
    _err = 0
    for channel in channels_sorted:
        try:
            log_not("Adding: " + channel.name)
            playlist_src += '#EXTINF:-1, %s\n%s\n' % (c.to_string(
                channel.name), c.to_string(channel.url()))
            playlist_dst += c.build_channel_lines(
                channel, _channel_logo_, _logo_path_file(channel.name),
                streamer, group, _playlist_type_, _channel_epgname_,
                _channel_epgid_, _channel_group_)
            _num += 1
        except ChannelIsNotBroadcastingError:
            log_not("... Not broadcasting. Skipped.")
            _err += 1
        except AuthenticationError:
            return _authent_error_, 0, 0
        except TooManyDevicesError:
            return _toomany_error_, 0, 0
        except NoPlaylistUrlsError:
            log_not("... No playlist URL provided. Skipped.")
            _err += 1
    c.write_file(playlist_src,
                 xbmc.translatePath(os.path.join(_profile_, _playlist_src_)),
                 _log_dbg)
    c.write_file(playlist_dst,
                 xbmc.translatePath(os.path.join(_profile_, _playlist_dst_)),
                 _log_dbg)
    if _playlist_type_ == 3:
        c.write_streamer(
            xbmc.translatePath(os.path.join(_profile_, _playlist_streamer_)),
            xbmc.translatePath(os.path.join(_profile_, _playlist_src_)),
            _ffmpeg_, _log_dbg)
    set_setting('last_time', time.strftime('%Y-%m-%d %H:%M'))
    set_setting('last_downloaded', c.to_string(_num))
    set_setting('last_skipped', c.to_string(_err))
    return _no_error_, _num, _err
Пример #12
0
 def get_new_config_entries(self):
   """This method is intended to retrieve new configuration file entry
      element after the initial installation is complete.  It will 
      create a file containing the selected entry points that can be
      merged into the existing Factory configuration file.
   """
   self.get_config_entries_data()
   filename = "%s/new_entries.%s" % (self.config_dir(),common.time_suffix())
   common.write_file("w",0644,filename,self.config_entries_data())
Пример #13
0
def create_ipop_config(path, gid) :
    data = ''
    data += '<IpopConfig>'
    data += '<IpopNamespace>' + gid + '</IpopNamespace>'
    data += '<VirtualNetworkDevice>tapipop</VirtualNetworkDevice>'
    data += '<EnableMulticast>false</EnableMulticast>'
    data += '</IpopConfig>'
    common.write_file(path, data)
    return 0
Пример #14
0
  def create_env_script(self):
    common.logit("\nCreating VO frontend env script.")
    data = """#!/bin/bash
. %(condor_location)s/condor.sh
export PYTHONPATH=$PYTHONPATH:%(install_location)s/..
""" % { "condor_location" : self.condor_location(),
        "install_location" : self.glideinwms_location(),}
    common.write_file("w",0644,self.env_script(),data)
    common.logit("VO frontend env script created: %s" % self.env_script() )
Пример #15
0
def main():
    config = sys.argv[1]
    data = read_files(config).split("\n")
    remove_list = list()
    for line in data:
        remove_list = check_hdfs_dirs(line)
    for item in remove_list:
        write_file(output_file, item)
        remove_hdfs_dirs(item)
Пример #16
0
def create_cert():

    # Checks to see if path exists
    if(not os.path.exists('demoCA/cacert.pem')):
        os.system('/usr/lib/ssl/misc/CA.pl -newca')

        # Create blank signed request file
        common.write_file('signed_cert_ids.txt','')

    return 0
Пример #17
0
 def applyChanges(self):
     """
     Redefined from *EditionWidget* class.
     """
     self.prev_state = _text2unicode(self.editor.toPlainText())
     try:
         write_file(self.file_name, self.prev_state)
     except IOError:
         message = translate("AsterStudy", "Cannot write file.")
         Q.QMessageBox.critical(self, "AsterStudy", message)
Пример #18
0
 def modify_and_push(self):
     test_file = os.path.join("php","test.php")
     common.write_file(os.path.join(self.app_name, test_file), "<?php phpinfo(); ?>")
     cmd = "cd %s && git add %s && git commit -m 'test' -a && git push" % (self.app_name, test_file),
     (status, output) = common.cmd_get_status_output(cmd)
     if (status == 0):
         return 0
     else:
         self.error(output)
         return 1
Пример #19
0
 def accept(self):
     """
     Redefined from *Dialog* class.
     """
     text = _text2unicode(self.editor.toPlainText())
     try:
         write_file(self.file_name, text)
     except IOError:
         message = translate("AsterStudy", "Cannot write file.")
         Q.QMessageBox.critical(self, "AsterStudy", message)
     super(TextFileDialog, self).accept()
Пример #20
0
 def get_new_config_group(self):
   """This method is intended to create a new group element after the initial 
      installation is complete.  It will create a file containing the group
      and job selection/matchin criteria.  This can then be manually merged 
      into the existing frontend configuration file.
   """
   filename = "%(config_dir)s/%(group)s.%(time)s" % \
        { "config_dir" : self.config_dir(),
          "group"      : self.group_name(),
          "time"       : common.time_suffix(),}
   common.write_file("w",0644,filename,self.get_match_criteria())
def download_file(reqs_ses, url, filepath):
    res = common.fetch(
        requests_session=reqs_ses,
        url=url,
        method='get',
        expect_status=200,
    )
    common.write_file(  # Store page to disk
        file_path=filepath, data=res.content)
    logging.debug('Saved {0} to {1}'.format(url, filepath))
    return
Пример #22
0
def fb_store_req(fb, cert_req):

    # Prepare data for Facebook
    data = common.fb_encode(cert_req)

    # Put data on Facebook
    obj_id = common.fb_put_data(fb, fb.uid, data, 'request')

    # Write object_id to file
    common.write_file('fb_req_obj_id.txt', str(obj_id))

    return 0
Пример #23
0
  def create_env_script(self):
    """This creates an "env" type script that must be used before starting the
       factory.
    """
    common.logit("Creating environment script...")
    data = """#!/bin/bash
export X509_CERT_DIR=%(x509_cert_dir)s
.  %(condor_location)s/condor.sh
""" % { "x509_cert_dir"   : self.wms.x509_cert_dir(), 
        "condor_location" : self.wms.condor_location(),}
    common.write_file("w",0644,self.env_script(),data)
    common.logit("%s\n" % data)
Пример #24
0
 def __create_initd_script__(self):
   if self.client_only_install == True:
     common.logit("... client only install. No startup initd script required.")
     return
   if self.install_type() == "rpm":
     common.logit("... This is an 'rpm' install. An initd script already exists.")
     return
   common.logit("")
   common.logit("Creating startup /etc/init.d script")
   common.logit("   %s" % self.initd_script())
   data = self.__initd_script__()
   common.write_file("w",0755,self.initd_script(),data,SILENT=True)
Пример #25
0
def train_data(args):
    """
    Train the model. 

    :param args: args for trin_data
    :type args: Namespace
    """
    # pull args out
    training_dir = args.training_dir
    train_file = args.train_file
    eval_file = args.eval_file
    epochs = args.epochs
    continue_training = args.keep_training
    checkpoint_dir = args.checkpoint_dir
    #truncate = args.truncate
    vocab = args.vocab

    # do some checks
    try:
        if continue_training:
            assert checkpoint_dir is not None, 'to continue training you must give checkpoint_dir'
            assert os.path.exists(checkpoint_dir), 'checkpoint_dir must exist'
        assert epochs > 0, 'epochs must be positive'
        assert os.path.exists(train_file), "train_file must exist"
        assert os.path.exists(training_dir), "training_dir must exist"
        assert os.path.exists(eval_file), "eval_file must exist"
        assert os.path.exists(vocab), "vocab file must exist"
        #assert truncate > 0, 'truncate must be positive'
    except AssertionError as err:
        logger.error("Failed check: {}".format(err))
        return

    # get train and eval data
    train_data, train_labels = process_data.process_data(train_file, vocab)
    test_data, test_labels = process_data.process_data(eval_file, vocab)

    # make the checkpoint directory
    if not continue_training and checkpoint_dir is None:
        checkpoint_dir = common.grab_next_session(training_dir)

    # train the model
    history, model_summary = rnn.train_and_validate(train_data, train_labels,
                                                    test_data, test_labels,
                                                    epochs, checkpoint_dir,
                                                    continue_training)

    # plot the data
    if not continue_training:
        common.write_file(model_summary, checkpoint_dir + "/MODEL_SUMMARY")
        common.plot_graphs_val(history, 'categorical_accuracy', checkpoint_dir)
        common.plot_graphs_val(history, 'loss', checkpoint_dir)
Пример #26
0
  def create_env_script(self):
    """This creates an "env" type script that must be used before starting the
       factory.
    """
    common.logit("Creating environment script...")
    data = """#!/bin/bash
export X509_CERT_DIR=%(x509_cert_dir)s
source %(condor_location)s/condor.sh
""" % { "x509_cert_dir"   : self.wms.x509_cert_dir(), 
        "condor_location" : self.wms.condor_location(),}
    if self.use_vofrontend_proxy() == "n":
      data += "export X509_USER_PROXY=%s" % self.x509_proxy()
    common.write_file("w",0644,self.env_script(),data)
    common.logit("%s\n" % data)
Пример #27
0
def create_word2vec_model():
    mecab = MeCab.Tagger('-Owakati')
    nouns = []
    articles = {}

    # text from NHK NEWS WEB
    urls = [
        'http://www3.nhk.or.jp/rss/news/cat' + str(i) + '.xml'
        for i in range(8)
    ]
    nhk = ''
    for url in urls:
        html = urllib.request.urlopen(url)
        soup = BeautifulSoup(html, 'html.parser')

        items = soup.find_all('item')
        for i in items:
            item_soup = BeautifulSoup(str(i), 'html.parser')
            text = remove_tag(item_soup.title, 'title') + ' ' + remove_tag(
                item_soup.description, 'description')
            nhk += text
            articles[remove_tag(item_soup.title, 'title')] = {
                'url': remove_tag(item_soup.link, 'link'),
                'nouns': list(set(extract_nouns(text)))
            }

    nhk_nouns = extract_nouns(nhk)
    write_json_file(list(set(nhk_nouns)), 'nhk-nouns')
    write_json_file(articles, 'nhk-articles')
    nouns.extend(nhk_nouns)

    # text from twitter user time-line
    tweets = api.user_timeline(count=200)
    twitter = ''
    for tweet in tweets:
        tweet = process_tweet(tweet)
        if tweet:
            twitter += tweet + '\n'
    twitter_nouns = extract_nouns(twitter)
    nouns.extend(twitter_nouns)
    write_json_file(list(set(twitter_nouns)), 'twitter-nouns')

    write_json_file(list(set(nouns)), 'nouns')
    write_file(mecab.parse(nhk + twitter), 'wakati.txt')

    data = word2vec.Text8Corpus('wakati.txt')
    model = word2vec.Word2Vec(data, size=200, min_count=0)

    return model
Пример #28
0
def parse_formula_sheet(sh, output_file):

    all_fun = begin
    for row in range(2, sh.nrows):
        skill_id_str = get_str_from_sheet(sh, row, 0)
        skill_name_str = get_str_from_sheet(sh, row, 1)
        skill_formula_str = get_str_from_sheet(sh, row, 2)
        fun_str = parse_function(skill_formula_str, u"技能名:" + skill_name_str,
                                 u"ser_%s" % skill_id_str, dicVar,
                                 srcSkillFormla)
        all_fun += fun_str
    #	src_file = path + "/" + skill_id_str + ".c"
    #	write_src(output_file, begin, end, fun_str, "utf-8")
    all_fun += end
    write_file(output_file, all_fun)
Пример #29
0
def sign_cert_req(req):

    # Write req to file
    fname = 'tmp_cert_req.pem'
    fname_cert = 'newcert.pem'
    common.write_file(fname, req)

    # Sign the certificate
    os.system('openssl ca -batch -policy policy_anything -key secret \
            -in ' + fname + ' -out ' + fname_cert)

    # Read certificate
    signed_cert = common.read_file(fname_cert)

    return signed_cert
Пример #30
0
  def __create_condor_mapfile__(self,users):
    """ Creates the condor mapfile for GSI authentication"""
    if self.client_only_install == True:
      common.logit( "... No Condor mapfile file needed. Client only install")
      return
    mapfile_entries = self.__condor_mapfile_entries__(users)
    filename = self.condor_mapfile()
    common.logit("... creating Condor mapfile")
    common.logit("    %s" % filename)
    common.make_directory(os.path.dirname(filename),pwd.getpwuid(os.getuid())[0],0755)
    mapfile_entries += """GSI (.*) anonymous
FS (.*) \\1
""" 
    common.write_file("w",0644,filename,mapfile_entries,SILENT=True)
    common.logit("\nCondor mapfile entries:")
    common.logit("%s" % mapfile_entries)
Пример #31
0
def save_followed_file(req_ses, followed_list_path):
    logging.info('Saving list of followed blogs')
    followed_list_path = os.path.join('dl', 'followed.opml')
    # Get followed list file
    # www.tumblr.com/following.opml
    logging.debug('Loading followed list')
    followed_res = common.fetch(
        requests_session=req_ses,
        url='https://www.tumblr.com/following.opml',
        method='get',
    )
    common.write_file(  # Save to file
        file_path=followed_list_path,
        data=followed_res.content)
    logging.info('Saved list of followed blogs')
    return
Пример #32
0
def main():
    template_data = common.read_file(ALL_TESTS_TEMPLATE_FILE)
    template = Template(template_data)

    test_html_paths = []
    for file_name in common.get_files_with_suffix(
            common.GENERATED_TEST_BASE_PATH, '_test.html'):
        test_html_paths.append("  '%s/%s'," % (GRPC_WEB_BASE_DIR, file_name))
    # Example output paths:
    # 'packages/grpc-web/generated/test_htmls/javascript__net__grpc__web__grpcwebclientbase_test.html',
    # 'packages/grpc-web/generated/test_htmls/javascript__net__grpc__web__grpcwebstreamparser_test.html',
    test_html_paths_str = "\n".join(test_html_paths)

    # Writes the generated output to the all_tests.js file.
    common.write_file(GENERATED_ALL_TESTS_JS_PATH,
                      template.substitute(test_html_paths=test_html_paths_str))
Пример #33
0
def create_values(hostip, port, section, time_key, db):
    dest_dir = "%s/%s" % (base_dir, section)
    create_current_dir(dest_dir)
    check_key = time_key
    out_dict = get_redis_info(hostip, port)
    write_info_disk(out_dict, dest_dir)

    lasttime = return_redis_keys(hostip, port, db, check_key)
    timeout_files = os.path.join(dest_dir, 'sync_timeout')
    if not lasttime or diff_time(lasttime) > 600:
        write_file(timeout_files, lasttime or '1')
    else:
        if os.path.isfile(timeout_files):
            os.remove(timeout_files)

    if int(out_dict['connected_clients']) > 1000:
        redis_connect_high(hostip, port, dest_dir)
Пример #34
0
def main():
    dirs = sys.argv[1]
    expired_values = ['20170227', '20170226', '20170225', '20170224', '20170223']
    expired_dirs = get_expired_list(dirs, expired_values)
    # 计算出的过期日志写入文件
    expired_log = "expired.log"
    for dirs in expired_dirs:
        write_file(expired_log, dirs)
    # mv 过期目录到另一个目录
    suffix = "/data/temp/backup/hdfs_backup"
    target = "/data/backups/expired"
    for dirs in expired_dirs:
        target_dirs = dirs.replace(suffix, target)[:-9]
        mv_cmd = "mv %s %s" % (dirs, target_dirs)
        print(mv_cmd)
        # run_cmd(mv_cmd)

    print "all expired files remove"
Пример #35
0
def _gen_test_html(js_file_path: str, template: Template):
    """Generates a Closure test wrapper HTML and saves it to the filesystem."""
    # Generates the test_file_name so that:
    #   ../../javascript/net/grpc/web/grpcwebclientbase_test.js
    # will now be named:
    #   javascript__net__grpc__web__grpcwebclientbase_test.html
    test_file_name = js_file_path
    while test_file_name.startswith('../'):
        test_file_name = test_file_name[3:]
    test_file_name = test_file_name.replace('/', '__')
    test_file_name = os.path.splitext(test_file_name)[0] + '.html'

    # Generates the test HTML using the package name of the test file
    package_name = _extract_closure_package(js_file_path)
    generated_html = template.substitute(package=package_name)

    # Writes the test HTML files
    common.write_file(common.GENERATED_TEST_BASE_PATH + test_file_name,
                      generated_html)
Пример #36
0
def update_cmpnodes_res_conf(filename):
    """
    update compute node resource in slurm configuration
    :param filename:
    :return:
    """
    # read slurm.conf
    if not os.path.isfile(filename):
        logger.error("%s is not exists", filename)
        return -1

    cluster_info = get_cluster_info()
    # 1. get compute node resource
    ctl_resource = ""  # the first line in resource.info
    cmp_resource = ""  # the last line in resource.info
    with open(RESOURCE_INFO_FILE, "r") as info:
        lines = info.readlines()
        for line in lines:
            if line:
                if not ctl_resource:
                    ctl_resource = line
                else:
                    cmp_resource = line

    # get modified compute node
    node_list = get_slurm_cmpnode_list(cluster_info)

    buf = StringIO.StringIO()
    with open(filename, 'r') as f:
        for line in f:
            if line.startswith("NodeName=node"):
                # put Slurm Partition conf here
                line = NODE_RESOURCE_FMT.format(node_list, cmp_resource)

            buf.write("\n")
            buf.write(line)

    content = buf.getvalue()
    buf.close()

    write_file(filename, content)
    return 0
Пример #37
0
def start(init_url):
    urls = []
    # init_url = input('specify an area: ')
    url_pattern = re.search(r'-g\d+-', init_url)
    url_pos = url_pattern.start() + len(url_pattern.group(0))
    page_number = find_max_page(common.load_soup_online(init_url))
    print('locations in {} pages.'.format(page_number))
    for idx_page in range(page_number):
        page_url = init_url if idx_page == 0 else ''.join(
                [init_url[:url_pos], 'oa', str(idx_page * 20),
                 '-', init_url[url_pos:]])
        print('[page {}] {}'.format(idx_page + 1, page_url))
        soup = common.load_soup_online(page_url)
        for hotel in soup.findAll('div', class_='geo_name'):
            a = hotel.find('a')
            a = common.TA_ROOT + a['href'][1:]
            urls.append(a)
    urls = list(set(urls))
    print('{} locations found.'.format(len(urls)))
    common.write_file('locations', ';'.join(urls))
Пример #38
0
def run_client_tasks(fb, gid, task):

    # Task 1: Get CA certificate from Facebook
    if(task == '1' or task == '0') :
        print 'Getting CA certificate from Facebook'
        if(get_ca_cert(fb, gid) == -1):
            print 'CA certificate now found for this group, terminating'
            return

        print 'CA certificate retreived successfully'

    # Task 2: Make certificate request
    if(task == '2' or task == '0'):
        print 'Making certificate request'
        make_cert_request(fb)
        print 'Request made successfully'

    # Loop task 3 until certificate is found
    while(True):

        # Task 3: Check Facebook for certificate
        if(task == '3' or task == '0'):
            print 'Checking Facebook for certificate'
            if(check_cert(fb) == 0):
                print 'Certificate retreived successfully'
                break
            else:
                print 'Certificate not retreived'

        # Wait for 60 seconds before checking Facebook again
        sleep(60)

    # Task 4: Configure IPOP and IPSec
    if(task == '4' or task == '0'):
        print 'Configuring virtual private network'
        configure_ipop(gid)
        configure_ipsec()
        common.write_file('/mnt/fd/fb_done', '')
        print '<h2>Virtual private network configured successfully<h2/>'

    return 0
Пример #39
0
def main():
    
    api_key = 'd33a7f5305f6ff7dd4a603f9049f87f3'
    secret = '2b7b550dea45ccea6ada02b0fafb38c5'

    # Create facebook object
    fb = facebook.Facebook(api_key, secret)

    # Get session information
    fb.session_key = sys.argv[1]
    fb.uid = sys.argv[2]
    gid = sys.argv[3]
    node_type = sys.argv[4]

    # Store type to fileos.system
    common.write_file('type', node_type)

    # Run CA tasks
    run_ca_tasks(fb, gid, '0')
    
    return
Пример #40
0
 def update(self):
   common.logit("\n--- Privilege Separation is in effect ---\nThe following directories/files are being created to support this.")
   #-- some validation on the condor install ---
   if not os.path.isdir(self.condor_location):
     common.logerr("The CONDOR_LOCATION specified does not exist: %s" % self.condor_location)
   #--- check for Condor switchboard ---
   if not os.path.isfile(self.switchboard_bin):
     common.logerr("Privilege separation binary (%s) does not exist. Do you have the right version of Condor?" % self.switchboard_bin)
   if os.stat(self.switchboard_bin)[stat.ST_UID] != 0:
     common.logerr("Privilege separation binary (%s) must be owned by root!" % self.switchboard_bin)
   #-- create the config file ---
   common.logit("... creating condor config file: %s" % (self.config_file))
   if not os.path.isdir(os.path.dirname(self.config_file)):
     os.mkdir(os.path.dirname(self.config_file))
   common.write_file("w",0644,self.config_file,self.config_data())
   #-- setuid on swtichboard ---
   common.logit("... changing permissions on %s to %s" % (self.switchboard_bin,"04755"))
   os.chmod(self.switchboard_bin,04755)
   #-- create factory directories ---
   #-- factory dirs done in Factory install --
   # self.factory.create_factory_dirs(self.factory.username(),0755)
   self.create_factory_client_dirs('root',0755)
   common.logit("--- End of updates for Privilege Separation.--- ")
Пример #41
0
def parse_formula_sheet(sh, output_file):

    print(dicVar)
    all_fun = begin
    for row in range(3, sh.nrows):
        skill_id_str = get_str_from_sheet(sh, row, 0)
        #skill_name_str = get_str_from_sheet(sh, row, 1)
        skill_formula_str = get_str_from_sheet(sh, row, 1)
        print(skill_id_str, skill_formula_str)
        #fun_str = parse_function(skill_formula_str, u"技能ID:"+skill_id_str, u"param_%s" % skill_id_str, dicVar, srcSkillFormla)
        #print( fun_str );

        expSrc = parse_expr_right(skill_formula_str, dicVar, 0, {})

        fun_str = srcSkillFormla % (skill_id_str, skill_id_str,
                                    skill_formula_str, expSrc[1])
        print(fun_str)

        all_fun += fun_str
    write_src(output_file, begin, end, all_fun, "utf-8")

    all_fun += end
    write_file(output_file, all_fun)
Пример #42
0
def find_blog_name_thorough(req_ses, blog_url):  # TODO
    logging.debug(
        'Using slower, more thorough name-finding on {0!r}'.format(blog_url))
    # Extract domain
    # 'http://nsfw.kevinsano.com'
    # 'nsfw.kevinsano.com'
    domain_search = re.search(r'(?:https?://)?([^\\/]+\.\w+)/?', blog_url)
    if domain_search:
        domain = domain_search.group(1)
        logging.debug('domain={0!r}'.format(domain))
    else:
        logging.error('Could not identify domain! Failing.')
        return None
    # Genreate archive page URL
    blog_rss_url = 'http://{0}/rss'.format(domain)
    logging.debug('blog_rss_url={0!r}'.format(blog_rss_url))
    rss_path = os.path.join('debug',
                            'run_grab_site.find_blog_name_thorough.rss.rss')
    # Load archive page
    rss_res = common.fetch(
        requests_session=req_ses,
        url=blog_rss_url,
        method='get',
    )
    common.write_file(  # Save to file for debugging
        file_path=rss_path,
        data=rss_res.content)
    # Extract blog name from page
    # '<generator>Tumblr (3.0; @nsfwkevinsano)</generator>'
    # 'nsfwkevinsano'
    name_search = re.search('<generator>[^<]{0,25}@([^)<]+)\)</generator>',
                            rss_res.content)
    if name_search:
        blog_name = name_search.group(1)
    logging.debug('blog_name={0!r}'.format(blog_name))
    return blog_name
Пример #43
0
    def set_text_section(self):
        """
        Pull the .text section out.
        """
        if os.path.exists(GDB_OUTPUT):
            os.remove(GDB_OUTPUT) 

        sections_gdb = os.path.join(GDB_SCRIPT_DIR, 'sections.gdb') 
        if not os.path.exists(sections_gdb):
            common.write_file(SECTIONS_GDB, sections_gdb)

        cmd = ['gdb', '--batch', '--command={}'.format(GDB_TEXT_SEC), self.elf_file]
        gdb = subprocess.Popen(cmd, stdout=PIPE)
        gdb.wait()
        gdb.stdout.close()

        gdb_output = common.read_file(GDB_OUTPUT)

        for line in gdb_output: 
            if '.text' in line: 
                line = line.split()
                self.__text_start__ = line[0] # start addr
                self.__text_end__ = line[2] # end addr
                break
def load_archive(req_ses, blog_name):
    base_url = 'http://{0}.tumblr.com/archive'.format(blog_name)
    all_history = ''
    # Load first block
    first_response = common.fetch(
        requests_session=req_ses,
        url=base_url,
        method='get',
    )
    common.write_file(file_path=os.path.join('debug', 'first_response.html'),
                      data=first_response.content)
    all_history += first_response.content
    # Find last post date
    last_post_date = find_last_post_date(html=first_response.content)
    # Load subsequent history
    while True:
        # Load next block
        scroll_url = 'http://{0}.tumblr.com/archive?before_time={1}'.format(
            blog_name, last_post_date)
        scroll_response = common.fetch(
            requests_session=req_ses,
            url=base_url,
            method='get',
        )
        common.write_file(file_path=os.path.join('debug',
                                                 'scroll_response.html'),
                          data=scroll_response.content)
        all_history += scroll_response.content
        # Find last post date
        last_post_date = find_last_post_date(html=scroll_response.content)
        # Stop if no more posts
        if not last_post_date:
            break

    # Store combined page
    common.write_file(file_path=os.path.join('debug', 'all_history.html'),
                      data=all_history)
    return all_history
Пример #45
0
def configure_ipop(gid):

    # Create files based on user input
    dhcpdata = common.read_file('configs/dhcpdata.conf')
    dhcpdata = dhcpdata.replace('UFGrid00', gid)

    # Files are saved to floppy to maintain compatibility with appliance
    common.write_file('/mnt/fd/dhcpdata.conf', dhcpdata)
    common.write_file('/mnt/fd/ipop_ns', gid)
    common.write_file('/usr/local/ipop/var/ipop_ns', gid)

    # Create ipop config file
    create_ipop_config('/usr/local/ipop/var/ipop.config',gid)

    # Define the type of server created by user
    os.system('cp type /mnt/fd/type')

    # Update the node config file
    os.system('cp /mnt/fd/node.config /usr/local/ipop/var/node.config')

    # Add racoon to ipop restart
    os.system('echo \'/etc/init.d/racoon restart\' >> /etc/init.d/ipop.sh')

    return 0
Пример #46
0
    return dict_out

def merge_dicts(dict_list_id_sorted):
    output = []
    pre_dict_item = dict_list_id_sorted[0]
    same_id_term = [dict_per_ad(pre_dict_item)]
    for dict_item in dict_list_id_sorted[1:]:
        if dict_item["uid"] == pre_dict_item["uid"]:
            same_id_term.append(dict_per_ad(dict_item))
        else:
            output.append({"uid": pre_dict_item["uid"], "name": pre_dict_item["name"], "former_names": pre_dict_item["former_names"], "each_term": same_id_term})
            same_id_term = [dict_per_ad(dict_item)]
        pre_dict_item = dict_item
    output.append({"uid": pre_dict_item["uid"], "name": pre_dict_item["name"], "former_names": pre_dict_item["former_names"], "each_term": same_id_term})
    return output

objs = json.load(open('../data/npl_ly.json'))
for npl_legislator in objs:
    common.normalize_name(npl_legislator)
    if not npl_legislator.get('elected_party'):
        npl_legislator['elected_party'] = npl_legislator['party']
dump_data = json.dumps(objs, sort_keys=True, ensure_ascii=False)
common.write_file(dump_data, '../data/npl_ly.json')
dump_data = json.dumps(objs, sort_keys=True, indent=4, ensure_ascii=False)
common.write_file(dump_data, '../data/pretty_format/npl_ly.json')
merged_npl = merge_dicts(sorted(objs, key=lambda d: [d["uid"], d['ad']]))
dump_data = json.dumps(merged_npl, sort_keys=True, indent=4, ensure_ascii=False)
common.write_file(dump_data, '../data/pretty_format/npl_ly(same_id_in_one_dict).json')
dump_data = json.dumps(merged_npl, sort_keys=True)
common.write_file(dump_data, '../data/npl_ly(same_id_in_one_dict).json')
Пример #47
0
def write_info_disk(data_d, dest_dir):
    for key in data_d.keys():
        filename = os.path.join(dest_dir, key)
        write_file(filename, str(data_d[key]).strip())
Пример #48
0
def run():
    """ create trafodion user, bashrc, setup passwordless SSH """
    dbcfgs = json.loads(dbcfgs_json)

    distro = dbcfgs['distro']
    if 'CDH' in distro:
        hadoop_type = 'cloudera'
    elif 'HDP' in distro:
        hadoop_type = 'hortonworks'
    elif 'APACHE' in distro:
        hadoop_type = 'apache'

    home_dir = get_default_home()
    # customize trafodion home dir
    if dbcfgs.has_key('home_dir') and dbcfgs['home_dir']:
        home_dir = dbcfgs['home_dir']

    traf_user = dbcfgs['traf_user']
    traf_user_dir = '%s/%s' % (home_dir, traf_user)
    traf_dirname = dbcfgs['traf_dirname']
    traf_home = '%s/%s' % (traf_user_dir, traf_dirname)

    hbase_xml_file = dbcfgs['hbase_xml_file']
    auth_key_file = '%s/.ssh/authorized_keys' % traf_user_dir
    ssh_cfg_file = '%s/.ssh/config' % traf_user_dir
    ulimits_file = '/etc/security/limits.d/%s.conf' % traf_user

    # create trafodion user and group
    if cmd_output('getent passwd %s' % traf_user):
        # trafodion user exists, set actual trafodion group
        traf_group = cmd_output('id -ng %s' % traf_user)
    else:
        # default trafodion group
        traf_group = traf_user
        if not cmd_output('getent group %s' % traf_group):
            run_cmd('groupadd %s > /dev/null 2>&1' % traf_group)
        traf_pwd = dbcfgs['traf_pwd']
        run_cmd('useradd --shell /bin/bash -m %s -g %s --home %s --password "$(openssl passwd %s)"' % (traf_user, traf_group, traf_user_dir, traf_pwd))
    # hbase group is generally either hbase or hadoop, depending on distro
    if cmd_output('getent group hbase'):
        cmd_output('/usr/sbin/usermod -a -G hbase %s' % traf_user)
    if cmd_output('getent group hadoop'):
        cmd_output('/usr/sbin/usermod -a -G hadoop %s' % traf_user)
    if cmd_output('getent group hive'):
        cmd_output('/usr/sbin/usermod -a -G hive %s' % traf_user)

    if not os.path.exists(traf_user_dir):
        run_cmd('mkdir -p %s' % traf_user_dir)
        run_cmd('chmod 700 %s' % traf_user_dir)

    # set ssh key
    run_cmd_as_user(traf_user, 'echo -e "y" | ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa')
    # the key is generated in copy_file script running on the installer node
    run_cmd('cp %s{,.pub} %s/.ssh/' % (SSHKEY_FILE, traf_user_dir))

    run_cmd_as_user(traf_user, 'cat ~/.ssh/id_rsa.pub > %s' % auth_key_file)
    run_cmd('chmod 644 %s' % auth_key_file)

    ssh_cfg = 'StrictHostKeyChecking=no\nNoHostAuthenticationForLocalhost=yes\n'
    with open(ssh_cfg_file, 'w') as f:
        f.write(ssh_cfg)
    run_cmd('chmod 600 %s' % ssh_cfg_file)

    run_cmd('chown -R %s:%s %s/.ssh/' % (traf_user, traf_group, traf_user_dir))

    hb = ParseXML(hbase_xml_file)
    zk_nodes = hb.get_property('hbase.zookeeper.quorum')
    zk_port = hb.get_property('hbase.zookeeper.property.clientPort')
    # set trafodion_config
    nodes = dbcfgs['node_list'].split(',')
    trafodion_config = """
export TRAF_HOME="%s"
export TRAF_VAR=$TRAF_HOME/tmp
export MY_SQROOT=$TRAF_HOME # for compatibility
export JAVA_HOME="%s"
export node_count="%s"
export HADOOP_TYPE="%s"
export ENABLE_HA="%s"
export ZOOKEEPER_NODES="%s"
export ZOOKEEPER_PORT="%s"
export SECURE_HADOOP="%s"
export CLUSTERNAME="%s"
""" % (traf_home, dbcfgs['java_home'], str(len(nodes)), hadoop_type, dbcfgs['enable_ha'],
       zk_nodes, zk_port, dbcfgs['secure_hadoop'], socket.gethostname())

    # save additonal configs for elastic
    trafodion_config += """
export hbase_xml_file="%s"
export hbase_lib_path="%s"
export traf_user="******"
export traf_version="%s"
export dcs_cnt_per_node="%s"
""" % (dbcfgs['hbase_xml_file'], dbcfgs['hbase_lib_path'], dbcfgs['traf_user'], dbcfgs['traf_version'], dbcfgs['dcs_cnt_per_node'])

    run_cmd('mkdir -p %s' % TRAF_CFG_DIR)
    write_file(TRAF_CFG_FILE, trafodion_config)

    if 'APACHE' in distro:
        extra_config = """
export HADOOP_PREFIX=%s
export HBASE_HOME=%s
export HIVE_HOME=%s
export PATH=$PATH:$HADOOP_PREFIX/bin:$HADOOP_PREFIX/sbin:$HBASE_HOME/bin
        """ % (dbcfgs['hadoop_home'], dbcfgs['hbase_home'], dbcfgs['hive_home'])
        append_file(TRAFODION_CFG_FILE, extra_config)

    # set permission
    run_cmd('chown -R %s:%s %s*' % (traf_user, traf_group, TRAF_CFG_DIR))

    # set ulimits for trafodion user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
%s   soft nofile 8192
%s   hard nofile 65535
''' % ((traf_user,) * 10)

    write_file(ulimits_file, ulimits_config)

    # change permission for hsperfdata
    if os.path.exists(TRAF_HSPERFDATA_FILE):
        run_cmd('chown -R %s:%s %s' % (traf_user, traf_group, TRAF_HSPERFDATA_FILE))

    # clean up unused key file at the last step
    run_cmd('rm -rf %s{,.pub}' % SSHKEY_FILE)

    print 'Setup trafodion user successfully!'
Пример #49
0
#!/usr/bin/python
# -*- coding: utf-8 -*
import json
import common


objs = json.load(open('../data/ly_info.json'))
for ly_legislator in objs:
    common.normalize_name(ly_legislator)
dump_data = json.dumps(objs)
common.write_file(dump_data, '../data/ly_info.json')
dump_data = json.dumps(objs, sort_keys=True, indent=4, ensure_ascii=False)
common.write_file(dump_data, '../data/pretty_format/ly_info.json')
empty_term_start = [(legislator["ad"], legislator["name"], legislator["links"]["ly"]) for legislator in objs if not legislator.has_key("term_start")]
dump_data = json.dumps(empty_term_start, sort_keys=True, indent=4, ensure_ascii=False)
common.write_file(dump_data, '../log/term_start_empty_on_lygovtw.json')
Пример #50
0
            if compare[key] != base[key]:
                f.write('key, %s, (ly.gov.tw), %s, (npl), %s, uid, %s, ad, %s, name, %s, links, %s\n' % (key, compare[key], base[key], base["uid"], base["ad"], base["name"], compare["links"]["ly"]))
        else:
            f.write('can not find key: %s\n' % key)

ly_dict_list = json.load(open('../data/%d/ly_info.json' % args.ad))
npl_dict_list = json.load(open('../data/%d/npl_ly.json' % args.ad))
for source in [ly_dict_list, npl_dict_list]:
    for legislator in source:
        common.normalize_name(legislator)
for npl_legislator in npl_dict_list:
    names_list = [npl_legislator["name"]]
    for name in npl_legislator.get("former_names", []):
        names_list.append(name)
    ly_legislator = find_legislator_from_ly_info(names_list, npl_legislator, ly_dict_list)
    if ly_legislator:
        term = complement(ly_legislator, npl_legislator)
# --> cross check data conflict
f = codecs.open('../log/conflict.txt','w', encoding='utf-8')
for ly_legislator in ly_dict_list:
    npl_legislator = find_legislator_from_npl(ly_legislator, npl_dict_list)
    if npl_legislator:
        conflict(ly_legislator, npl_legislator, f)
f.close()
# <-- end

dump_data = json.dumps(npl_dict_list, sort_keys=True, ensure_ascii=False)
common.write_file(dump_data, '../data/%d/merged.json' % args.ad)
dump_data = json.dumps(npl_dict_list, sort_keys=True, indent=4, ensure_ascii=False)
common.write_file(dump_data, '../data/pretty_format/%d/merged.json' % args.ad)
Пример #51
0
 def create_config(self):
   config_xml = self.config_data()
   common.logit("\nCreating configuration file: %s" % self.config_file())
   common.make_directory(self.config_dir(),self.username(),0755)
   common.write_file("w",0644,self.config_file(),config_xml)
Пример #52
0
def parse_sailor_sheet(sh, dicVar, output_path):
    startRow = get_int_from_sheet(sh, 3, 1) - 1
    headRow = startRow - 1
    print("startRow = %d" % startRow)
    print("headRow = %d" % headRow)

    #表添加了新的列,这个变量要改
    #endCol = 27
    endCol = sh.ncols

    for row in range(startRow, sh.nrows):
        baseSrcLst = []
        fightValueSrcLst = []
        sailorTmpSrcLst = []
        sailorBaseSrcLst = []
        sailorTrainSrcLst = []
        upstarUpgradeSailorLst = []
        upgradeSailorLst = []
        sailorInvestLst = []
        attrLst = []
        reviseAttrLst = []
        commonLst = []
        sailorId = get_str_from_sheet(sh, row, 0)
        for col in range(0, endCol):
            head = get_str_from_sheet(sh, headRow, col)
            if (len(head) == 0):
                continue
            data = get_str_from_sheet(sh, row, col)
            if (len(data) == 0):
                continue

            #print( "[%s]=%s"%(head, data) )
            if (head == u"formula"):
                note = get_str_from_sheet(sh, headRow - 2, col)
                # TODO:打断公式取左值,看看是那种类型的变量
                eqCharIdx = data.find(u"=")
                lrSplit = re.compile(u'[==]{1}')

                if eqCharIdx != -1:
                    lvalue = data[0:eqCharIdx]
                    lvalue = lvalue.strip()

                src = parse_function(data, note, "", dicVar, "\t// %s%s%s")
                print(src)
                if dicVar[lvalue]["var_type"] == u"战斗数值":
                    fightValueSrcLst.append(src)
                elif dicVar[lvalue]["var_type"] == u"水手临时数值":
                    sailorTmpSrcLst.append(src)
                elif dicVar[lvalue]["var_type"] == u"船只基础数值":
                    baseSrcLst.append(src)
                elif dicVar[lvalue]["var_type"] == u"水手基础属性值":
                    sailorBaseSrcLst.append(src)
                elif dicVar[lvalue]["var_type"] == u"水手洗练属性值":
                    sailorTrainSrcLst.append(src)
                elif dicVar[lvalue]["var_type"] == u"水手升星属性值":
                    upstarUpgradeSailorLst.append(src)
                elif dicVar[lvalue]["var_type"] == u"水手升级属性值":
                    upgradeSailorLst.append(src)
                elif dicVar[lvalue]["var_type"] == u"水手投资获得经验值":
                    sailorInvestLst.append(src)
                elif dicVar[lvalue]["var_type"] == u"公用":
                    commonLst.append(src)
                continue

            tmp = headExp.match(head)
            if tmp == None:
                continue

            var_name = tmp.group("var_name")
            var_type = tmp.group("var_type")

            if var_type == "int":
                var = int(data)
                srcAttr = attrSrc % (var_name, PythonData2Lpc(var))
                attrLst.append(srcAttr)
            if var_type == "float":
                var = float(data)
                srcAttr = attrSrc % (var_name, PythonData2Lpc(var))
                attrLst.append(srcAttr)
            if var_type == "string":
                var = data
                srcAttr = attrSrc % (var_name, PythonData2Lpc(var))
                attrLst.append(srcAttr)
            if var_type == "array":
                var = []
                tmpLst = data.split(",")
                for tmp in tmpLst:
                    var.append(int(tmp))
                srcAttr = attrSrc % (var_name, PythonData2Lpc(var))
                attrLst.append(srcAttr)
            if var_type == "special":
                var = []
                tmpLst = data.split(";")
                for st in tmpLst:
                    tm = st.split(",")
                    tmp1 = []
                    for tmp in tm:
                        tmp1.append(int(tmp))
                    var.append(tmp1)
                srcAttr = attrSrc % (var_name, PythonData2Lpc(var))
                attrLst.append(srcAttr)

            if var_type == "intermediate":
                #reviseVar = int(data)/100.0
                reviseVar = int(data)
                reviseAttr = attrReviseSrc % (var_name,
                                              PythonData2Lpc(reviseVar))
                reviseAttrLst.append(reviseAttr)

            if var_type == "dir":
                var = []
                tmpLst = data.split(";")
                for st in tmpLst:
                    tm = st.split(",")
                    tmp1 = []
                    for tmp in tm:
                        tmp1.append(tmp)
                    var.append(tmp1)
                srcAttr = attrSrc % (var_name, PythonData2Lpc(var))
                attrLst.append(srcAttr)

            if var_type == "dir_map":
                var = []
                tmpLst = data.split(";")
                for st in tmpLst:
                    tm = st.split(",")
                    tmp1 = []
                    for tmp in tm:
                        tmp1.append(int(tmp))
                    var.append(tmp1)
                srcAttr = attrSrc % (var_name, PythonData2Lpc(var))
                attrLst.append(srcAttr)
            if var_type == "map":
                var = parse_map(data)
                srcAttr = attrSrc % (var_name, PythonData2Lpc(var))
                attrLst.append(srcAttr)

        if sailorId != "":
            filename = "%s/%s.c" % (output_path, sailorId)
            filecontent = srcSailorTemplete % (
                u"\n".join(attrLst), u"\n".join(reviseAttrLst),
                u"\n".join(sailorInvestLst), u"\n".join(commonLst),
                u"\n".join(sailorBaseSrcLst), u"\n".join(sailorTrainSrcLst),
                u"\n".join(upstarUpgradeSailorLst),
                u"\n".join(upgradeSailorLst), u"\n".join(sailorTmpSrcLst),
                u"\n".join(baseSrcLst), u"\n".join(fightValueSrcLst))

            print(filecontent)
            try:
                src_data = open(filename, "rb").read()
                if len(src_data) == 0:
                    src_data = begin + "\n\n" + end
            except IOError:
                src_data = begin + "\n\n" + end

            tmp = p.sub(begin + "\n" + filecontent + "\n" + end, src_data)

            write_file(filename, tmp)
Пример #53
0
                f.write('key, %s, (ly.gov.tw), %s, (npl), %s, uid, %s, ad, %s, name, %s, links, %s\n' % (key, compare[key], base[key], base["uid"], base["ad"], base["name"], compare["links"]["ly"]))
        else:
            f.write('can not find key: %s\n' % key)

ly_dict_list = json.load(open('../data/ly_info.json'))
npl_dict_list = json.load(open('../data/npl_ly(same_id_in_one_dict).json'))
origin_npl_dict_list = json.load(open('../data/npl_ly.json'))
for npl_legislator in npl_dict_list:
    names_list = [npl_legislator["name"]]
    for name in npl_legislator.get("former_names", []):
        names_list.append(name)
    for term in npl_legislator["each_term"]:
        # ly.gov.tw didn't have legislators information of ad = 1
        if term["ad"] != 1:
            ly_legislator = find_legislator_from_ly_info(names_list, term, ly_dict_list)
            if ly_legislator:
                term = complement(ly_legislator, term)
# --> cross check data conflict
f = codecs.open('../log/conflict.txt','w', encoding='utf-8')
for ly_legislator in ly_dict_list:
    npl_legislator = find_legislator_from_npl(ly_legislator, origin_npl_dict_list)
    if npl_legislator:
        conflict(ly_legislator, npl_legislator, f)
f.close()
# <-- end

dump_data = json.dumps(npl_dict_list, sort_keys=True, ensure_ascii=False)
common.write_file(dump_data, '../data/merged.json')
dump_data = json.dumps(npl_dict_list, sort_keys=True, indent=4, ensure_ascii=False)
common.write_file(dump_data, '../data/pretty_format/merged.json')
    _log('Please check config.py')
    exit()
_log('Config OK')
device_id = _get_id(c.id_file)
if not (device_id or cfg.device_id):
    first_device_id = c.device_id()
    second_device_id = c.device_id()
    if first_device_id == second_device_id:
        cfg.device_id = first_device_id
    else:
        _device_id_ = c.random_hex16()
    _log('New Device Id: %s' % cfg.device_id)
else:
    if device_id:
        cfg.device_id = device_id
c.write_file(cfg.device_id, c.id_file, _log)

if cfg.stream_quality == 'PC':
    _quality_ = 'PC'
else:
    _quality_ = 'MOBILE'

_o2tvgo_ = O2TVGO(cfg.device_id, cfg.username, cfg.password, _quality_, _log)

if cfg.playlist_type == 3:
    c.write_streamer(os.path.join(cfg.playlist_path, cfg.playlist_streamer),
                     os.path.join(cfg.playlist_path, cfg.playlist_src),
                     cfg.ffmpeg_command, _log)

code, num, err = channel_playlist()
def run():
    dbcfgs = json.loads(dbcfgs_json)

    if not os.path.exists(dbcfgs['hbase_lib_path']):
        err('Cannot find HBase lib folder')
    if not os.path.exists(dbcfgs['java_home']):
        err('Cannot find Java, please set the JAVA_HOME on the new nodes to: %s'
            % dbcfgs['java_home'])

    home_dir = get_default_home()
    if dbcfgs.has_key('home_dir'):
        home_dir = dbcfgs['home_dir']

    traf_user = dbcfgs['traf_user']
    traf_home = dbcfgs['traf_home']
    traf_user_dir = '%s/%s' % (home_dir, traf_user)

    traf_ver = dbcfgs['traf_version']
    #    scratch_locs = dbcfgs['scratch_locs'].split(',')

    SUDOER_CFG = """
## Allow trafodion id to run commands needed for backup and restore
%%%s ALL =(hbase) NOPASSWD: %s/bin/hbase"
""" % (traf_user, DEF_HBASE_HOME)

    ### add trafodion user ###
    # create trafodion user and group
    if cmd_output('getent passwd %s' % traf_user):
        print 'user [%s] exists' % traf_user
        # trafodion user exists, set actual trafodion group
        traf_group = cmd_output('id -ng %s' % traf_user)
    else:
        # default trafodion group
        traf_group = traf_user
        if not cmd_output('getent group %s' % traf_group):
            run_cmd('groupadd %s' % traf_group)
        traf_shadow = dbcfgs['traf_shadow']
        print 'Adding user [%s]' % traf_user
        run_cmd(
            'useradd --shell /bin/bash -m %s -g %s --home %s --password "%s"' %
            (traf_user, traf_group, traf_user_dir, traf_shadow))
        print 'Added user [%s]' % traf_user

    if not os.path.exists(traf_user_dir):
        run_cmd('mkdir -p %s' % traf_user_dir)
        run_cmd('chmod 700 %s' % traf_user_dir)

    ### untar the copied trafoion binaries ###
    TRAF_PKG_FILE = '/tmp/traf_bin.tar.gz'
    run_cmd('mkdir -p %s' % traf_home)
    run_cmd('mkdir -p /etc/trafodion')
    run_cmd('tar xf %s -C %s' % (TRAF_PKG_FILE, traf_home))

    run_cmd('mv -f /tmp/trafodion_config %s' % TRAF_CFG_FILE)
    run_cmd('cp -rf /tmp/.ssh %s/..' % traf_home)
    run_cmd('mv -f /tmp/hbase-trx-* %s' % dbcfgs['hbase_lib_path'])
    run_cmd('mv -f /tmp/trafodion-utility-* %s' % dbcfgs['hbase_lib_path'])

    ### copy trafodion bashrc ###
    bashrc_template = '%s/sysinstall/home/trafodion/.bashrc' % traf_home
    bashrc_file = '%s/%s/.bashrc' % (home_dir, traf_user)
    # backup orig bashrc
    if os.path.exists(bashrc_file):
        run_cmd('cp -f %s %s.bak' % (bashrc_file, bashrc_file))
    run_cmd('cp -f %s %s' % (bashrc_template, bashrc_file))

    # set permission
    run_cmd('chmod 700 %s/../.ssh' % traf_home)
    cmd_output('chmod 600 %s/../.ssh/{id_rsa,config,authorized_keys}' %
               traf_home)
    run_cmd('chmod 777 %s' % TRAF_CFG_FILE)
    run_cmd('chown -R %s:%s /etc/trafodion' % (traf_user, traf_group))
    run_cmd('chmod +r %s/{hbase-trx-*,trafodion-utility-*}' %
            dbcfgs['hbase_lib_path'])
    run_cmd('chown -R %s:%s %s' % (traf_user, traf_group, traf_user_dir))

    ### modify CLUSTERNAME ###
    mod_file(TRAF_CFG_FILE,
             {'CLUSTERNAME=.*': 'CLUSTERNAME=%s' % socket.gethostname()})

    ### kernel settings ###
    run_cmd('echo "kernel.pid_max=65535" >> /etc/sysctl.conf')
    run_cmd('echo "kernel.msgmnb=65536" >> /etc/sysctl.conf')
    run_cmd('echo "kernel.msgmax=65536" >> /etc/sysctl.conf')
    run_cmd('/sbin/sysctl -p /etc/sysctl.conf 2>&1 > /dev/null')

    ### copy init script ###
    init_script = '%s/sysinstall/etc/init.d/trafodion' % traf_home
    if os.path.exists(init_script):
        run_cmd('cp -rf %s /etc/init.d/' % init_script)
        run_cmd('chkconfig --add trafodion')
        run_cmd('chkconfig --level 06 trafodion on')

    ### create and set permission for scratch file dir ###
#    for loc in scratch_locs:
#        # don't set permission for HOME folder
#        if not os.path.exists(loc):
#            run_cmd('mkdir -p %s' % loc)
#        if home_dir not in loc:
#            run_cmd('chmod 777 %s' % loc)

    if dbcfgs['enable_ha'] == 'true':
        # set trafodion sudoer file for specific cmds
        SUDOER_CFG += """
## Trafodion Floating IP commands
Cmnd_Alias IP = /sbin/ip
Cmnd_Alias ARP = /sbin/arping

## Allow Trafodion id to run commands needed to configure floating IP
%%%s ALL = NOPASSWD: IP, ARP
""" % traf_user

    ### write trafodion sudoer file ###
    with open(TRAF_SUDOER_FILE, 'w') as f:
        f.write(SUDOER_CFG)

    # set ulimits for trafodion user
    ulimits_file = '/etc/security/limits.d/%s.conf' % traf_user
    ulimits_config = '''
# Trafodion settings
%s   soft   core unlimited
%s   hard   core unlimited
%s   soft   memlock unlimited
%s   hard   memlock unlimited
%s   soft   nofile 32768
%s   hard   nofile 65536
%s   soft   nproc 100000
%s   hard   nproc 100000
''' % ((traf_user, ) * 8)

    write_file(ulimits_file, ulimits_config)
Пример #56
0
 def create_config(self,config_xml):
   common.logit("\nCreating configuration files")
   common.logit("   %s" % self.config_file())
   common.make_directory(self.config_dir(),self.username(),0755)
   common.write_file("w",0644,self.config_file(),config_xml,SILENT=True)
Пример #57
0
def write_file_by_dict(filename, dict_values):
    for key in dict_values.keys():
        msg = "%s#&#%s" % (key, dict_values[key])
        write_file(filename, msg)
    return True
Пример #58
0
#!/usr/bin/python
# -*- coding: utf-8 -*
import json
import common

objs = json.load(open('../data/ly_info.json'))
for ly_legislator in objs:
    common.normalize_name(ly_legislator)
dump_data = json.dumps(objs, sort_keys=True, ensure_ascii=False)
common.write_file(dump_data, '../data/ly_info.json')
dump_data = json.dumps(objs, sort_keys=True, indent=4, ensure_ascii=False)
common.write_file(dump_data, '../data/pretty_format/ly_info.json')
empty_term_start = [(legislator["ad"], legislator["name"],
                     legislator["links"]["ly"]) for legislator in objs
                    if not legislator.has_key("term_start")]
dump_data = json.dumps(empty_term_start,
                       sort_keys=True,
                       indent=4,
                       ensure_ascii=False)
common.write_file(dump_data, '../log/term_start_empty_on_lygovtw.json')
Пример #59
0
        url = a['href']
        desc = li.find('li', {"class": "font-balck"}).text
        # time = li.find('li', {"class": "publish"}).text
        h3 = li.findAll('h3')
        name = h3[0].text.strip()
        price = h3[1].text
        url = common_url + url
        if rental not in name:
            continue
        print url
        print name
        print desc
        print price
        urls.append(name + ' | ' + desc + ' | ' + price + ' | ' + url)
    return urls


def get_house_list():
    house_list = []
    rentals = get_community_list()
    for rental in rentals:
        house_list = get_woaiwojia_url(house_list, rental)
    return house_list


if __name__ == '__main__':
    urls = get_house_list()
    write_file(urls, 'woaiwojia.txt')
    get_woaiwojia_url([], '')
    print 'over'
Пример #60
0
        a = h3.findAll('a')[-1]
        url = a['href']
        name = a.text
        data_addr = h3.find('b')['data-addr'].replace('\n', '')
        data_addr = str(data_addr)
        data_addr = data_addr.split(" ")[0]
        price = box.find('p', {"class": "p1"})
        price = price.find('span', {"class": "en"}).text
        if rental not in data_addr:
            continue
        print url
        print data_addr
        print name
        print price
        urls.append(name + ' | ' + price + ' | ' + data_addr + ' | ' + common_url + url)
    return urls


def get_house_list():
    house_list = []
    rentals = get_community_list()
    for rental in rentals:
        house_list = get_ziru_url(house_list, rental)
    return house_list


if __name__ == '__main__':
    urls = get_house_list()
    write_file(urls, 'ziru.txt')
    print 'over'