Exemple #1
0
def main():
    if len(sys.argv) < 2:
        usage_and_exit()
    #print("top_dir: '%s'" % get_top_dir())
    ensure_7z_exists()
    conf = util.load_config()
    cert_pwd = conf.GetCertPwdMustExist()
    s3.set_secrets(conf.aws_access, conf.aws_secret)
    s3.set_bucket("kjkpub")

    ver = sys.argv[1]
    #print("ver: '%s'" % ver)
    svn_url = "https://sumatrapdf.googlecode.com/svn/tags/%srel" % ver
    src_dir_name = "SumatraPDF-%s-src" % ver
    archive_name = src_dir_name + ".7z"
    s3_path = "sumatrapdf/rel/" + archive_name
    print("svn_url: '%s'\ndir_name: '%s'\narchive_name: %s\ns3_path: %s" % (svn_url, src_dir_name, archive_name, s3_path))
    s3.verify_doesnt_exist(s3_path)

    os.chdir(get_top_dir())
    util.run_cmd_throw("svn", "export", svn_url, src_dir_name)
    util.run_cmd_throw("7z", "a", "-r", archive_name, src_dir_name)
    s3.upload_file_public(archive_name, s3_path)
    shutil.rmtree(src_dir_name)
    os.remove(archive_name)
Exemple #2
0
def fix():
	verify_started_in_right_directory()
	conf = load_config()
	s3.set_secrets(conf.aws_access, conf.aws_secret)
	s3.set_bucket("kjkpub")

	d = get_stats_cache_dir()
	files = os.listdir(d)
	all_vers = [stats_txt_name_to_svn_no(f) for f in files]
	all_vers_s3 = get_s3_vers()

	get_s3_files()
	for ver in all_vers_s3:
		if not valid_s3_ver(ver):
			fix_from_ver(ver, all_vers, all_vers_s3)

	prev_ver = all_vers[0]
	to_check = all_vers[1:-1]
	for ver in to_check:
		if ver != prev_ver + 1:
			missing_ver = prev_ver + 1
			print("missing ver %d" % missing_ver)
			fix_from_ver(missing_ver, all_vers, all_vers_s3)
			return
		prev_ver = ver
	print("All are ok!")
def main():
    url_update = "http://kjkpub.s3.amazonaws.com/sumatrapdf/sumpdf-update.txt"
    url_latest = "http://kjkpub.s3.amazonaws.com/sumatrapdf/sumpdf-latest.txt"

    conf = load_config()
    assert conf.aws_access != "" and conf.aws_secret != ""
    s3.set_secrets(conf.aws_access, conf.aws_secret)
    s3.set_bucket("kjkpub")

    v1 = get_latest_version(url_latest)
    (v2, ver) = get_update_versions(url_update)
    validate_ver(ver)
    assert not v2 or v1 == v2, "sumpdf-update.txt and sumpdf-latest.txt don't agree on Stable version, run build.py -release first"
    verify_version_not_lower(ver, v1, v2)
    sys.stdout.write("Going to update auto-update version to %s. Are you sure? [y/N] " % ver)
    sys.stdout.flush()
    ch = getch()
    print()
    if ch not in ['y', 'Y']:
        print("Didn't update because you didn't press 'y'")
        sys.exit(1)

    # remove the Stable version from sumpdf-update.txt
    s = "[SumatraPDF]\nLatest %s\n" % ver
    s3.upload_data_public(s, "sumatrapdf/sumpdf-update.txt")
    # keep updating the legacy file for now
    s = "%s\n" % ver
    s3.upload_data_public(s, "sumatrapdf/sumpdf-latest.txt")
    v1 = get_latest_version(url_latest)
    (v2, v3) = get_update_versions(url_update)
    if v1 != ver or v2 != None or v3 != ver:
        print("Upload failed because v1 or v3 != ver ('%s' or '%s' != '%s'" % (v1, v3, ver))
        sys.exit(1)
    print("Successfully update auto-update version to '%s'" % ver)
def email_msg(msg):
    c = load_config()
    if not c.HasNotifierEmail():
        print("email_build_failed() not ran because not c.HasNotifierEmail()")
        return
    sender, senderpwd = c.GetNotifierEmailAndPwdMustExist()
    subject = "SumatraPDF buildbot failed"
    util.sendmail(sender, senderpwd, ["*****@*****.**"], subject, msg)
 def _init_user_db(self, filename=USER_DB_FILE):
     raw_user_list = load_config(filename)['users']
     for raw_user in raw_user_list:
         username, password, credit = raw_user
         self._add_user(
             dict(username=username,
                 password=password),
             credit)
Exemple #6
0
def main(models, source_file, nbest_file, saveto, b=80,
         normalize=False, verbose=False, alignweights=False):

    # load model model_options
    options = []
    for model in models:
        options.append(load_config(model))

        fill_options(options[-1])

    rescore_model(source_file, nbest_file, saveto, models, options, b, normalize, verbose, alignweights)
def main():
    if len(sys.argv) < 2:
        usage_and_exit()
    #print("top_dir: '%s'" % get_top_dir())
    ensure_7z_exists()
    conf = util.load_config()
    assert conf.aws_access is not None, "conf.py is missing"
    s3.set_secrets(conf.aws_access, conf.aws_secret)
    s3.set_bucket("kjkpub")

    ver = sys.argv[1]
    #print("ver: '%s'" % ver)
    upload(ver)
Exemple #8
0
    def _load(self):
        if not self.check_sanity():
            return

        if (not os.path.exists(AUTH_DIR)): os.mkdir(AUTH_DIR)

        # load config
        self.config = util.load_config(self.path_conf)
        self.namespace = self.config.get("core", "namespace")
        self.clientid = self.config.get("core", "clientid")

        # load services from config
        self.srvmap = {}
        for tok in self.config.get("backend", "services").split(","):
            srv = services.factory(tok)
            self.srvmap[srv.sid()] = srv

        self.nreplicas = int(self.config.get("backend", "nreplicas"))

        nthreads = self.options.nthreads if self.options is not None else 2
        self.scheduler = Scheduler(self.services,
                                   (nthreads + 1) * len(self.srvmap))

        # load translator pipe
        if self.is_encypted():
            self.translators.append(translators.TrEncrypt(self))

        # TODO. for integrity option
        # if self.is_signed():
        #     self.translators.append(TrSigned(self))

        beg = time.time()
        if (os.path.exists(self.get_path("mapping.pcl"))):
            with open(self.get_path("mapping.pcl")) as f:
                self.mapping = pickle.load(f)
        else:
            mapconfig = []
            for srv in self.services:
                mapconfig.append((srv.sid(), srv.info_storage() / GB))
            hspacesum = sum(map(lambda x: x[1], mapconfig))
            hspace = max(hspacesum + 1, 1024)
            self.mapping = DetMap2(mapconfig,
                                   hspace=hspace,
                                   replica=self.nreplicas)
            self.mapping.pack()
            with open(self.get_path("mapping.pcl"), "w") as f:
                pickle.dump(self.mapping, f)
        end = time.time()
        dbg.time("mapping init %s" % (end - beg))
        dbg.dbg("head: %s", self.get_head_name())
    def _load_model_options(self):
        """
        Loads config options for each model.
        """
        options = []
        for model in self._models:
            m = load_config(model)
            if not 'concatenate_lm_decoder' in m:
                m['concatenate_lm_decoder'] = False
            options.append(m)
            # backward compatibility
            fill_options(options[-1])

        self._options = options
Exemple #10
0
def email_build_failed(ver):
	s3_url_start = "http://kjkpub.s3.amazonaws.com/sumatrapdf/buildbot/"
	c = load_config()
	if not c.HasNotifierEmail():
		return
	sender, senderpwd = c.GetNotifierEmailAndPwdMustExist()
	subject = "SumatraPDF build %s failed" % str(ver)
	checkin_url = "https://code.google.com/p/sumatrapdf/source/detail?r=%s" + str(ver)
	body = "Checkin: %s\n\n" % checkin_url
	build_log_url = s3_url_start + str(ver) + "/rel_build_log.txt"
	body += "Build log: %s\n\n" % build_log_url
	buildbot_index_url = s3_url_start + "index.html"
	body += "Buildbot: %s\n\n" % buildbot_index_url
	util.sendmail(sender, senderpwd, g_email_to, subject, body)
Exemple #11
0
    def _load_model_options(self):
        """
        Loads config options for each model.
        """

        self._options = []
        for model in self._models:
            config = load_config(model)
            # backward compatibility
            fill_options(config)
            config['reload'] = model
            self._options.append(argparse.Namespace(**config))

        _, _, _, self._num_to_target = load_dictionaries(self._options[0])
Exemple #12
0
def compile():
	
	owl_parser = Parser()
	page_data = { 
		'ontology': owl_parser.process(),
		'metadata': load_config()['site_info'] }

	#print data_dict
	
	with open('templates/base.html', 'rb') as base_template_file, \
		open('output/index.html', 'wB') as output:
		template = jinja_env.get_template( 'base.html')
		rendered_html = template.render(page_data=page_data)
		output.write(rendered_html)
Exemple #13
0
    def __init__(self):
        super(VisualizationWindow, self).__init__()
        pg.setConfigOptions(imageAxisOrder='row-major')
        self.settings = load_config()

        frame = pg.QtGui.QFrame()
        layout = pg.QtGui.QGridLayout()
        frame.setLayout(layout)

        self.camera_window = pg.GraphicsLayoutWidget(self)
        self.feature_window = pg.GraphicsLayoutWidget(self)
        self.detailed_feature_window = pg.GraphicsLayoutWidget()

        self.layer_frame = pg.QtGui.QFrame()
        self.layer_layout = pg.QtGui.QGridLayout()
        self.layer_frame.setLayout(self.layer_layout)

        layout.addWidget(self.camera_window, 0, 0, 1, 2)
        layout.addWidget(self.build_config_frame(), 1, 0, 1, 1)
        layout.addWidget(self.feature_window, 2, 0, 2, 2)
        layout.addWidget(self.layer_frame, 3, 0, 2, 2)

        layout.setRowStretch(0, 25)
        layout.setRowStretch(1, 2)
        layout.setRowStretch(2, 30)
        layout.setRowStretch(3, 1)

        self.setCentralWidget(frame)

        self.setGeometry(0, 200, 1600, 900)

        self.selected_filter = 1

        self.current_layer_dimensions = (512, 7, 7)
        self.ready = True

        self.rows, self.cols = good_shape(self.current_layer_dimensions[0])

        self.last_feature_image = None
        self.video_capture = None
        self.last_frame = None
        self.last_button = None
        self.feature_server_timer = None
        self.rois = []
        self.selector = None

        self.build_views()
        self.start_timers()
        self.process_settings()
Exemple #14
0
def hp_search(trial: optuna.Trial):
    if torch.cuda.is_available():
        logger.info("%s", torch.cuda.get_device_name(0))

    global gopt
    opt = gopt
    # set config
    config = load_config(opt)
    config['opt'] = opt
    logger.info("%s", config)

    # set path
    set_path(config)

    # set search spaces
    lr = trial.suggest_float('lr', 1e-5, 1e-3, log=True)
    bsz = trial.suggest_categorical('batch_size', [32, 64, 128])
    seed = trial.suggest_int('seed', 17, 42)
    epochs = trial.suggest_int('epochs', 1, opt.epoch)

    # prepare train, valid dataset
    train_loader, valid_loader = prepare_datasets(config, hp_search_bsz=bsz)

    with temp_seed(seed):
        # prepare model
        model = prepare_model(config)
        # create optimizer, scheduler, summary writer, scaler
        optimizer, scheduler, writer, scaler = prepare_osws(config, model, train_loader, hp_search_lr=lr)
        config['optimizer'] = optimizer
        config['scheduler'] = scheduler
        config['writer'] = writer
        config['scaler'] = scaler

        early_stopping = EarlyStopping(logger, patience=opt.patience, measure='f1', verbose=1)
        best_eval_f1 = -float('inf')
        for epoch in range(epochs):
            eval_loss, eval_f1 = train_epoch(model, config, train_loader, valid_loader, epoch)

            # early stopping
            if early_stopping.validate(eval_f1, measure='f1'): break
            if eval_f1 > best_eval_f1:
                best_eval_f1 = eval_f1
                early_stopping.reset(best_eval_f1)
            early_stopping.status()

            trial.report(eval_f1, epoch)
            if trial.should_prune():
                raise optuna.TrialPruned()
        return eval_f1
Exemple #15
0
def email_build_failed(ver):
    s3_url_start = "http://kjkpub.s3.amazonaws.com/sumatrapdf/buildbot/"
    c = load_config()
    if not c.HasNotifierEmail():
        return
    sender, senderpwd = c.GetNotifierEmailAndPwdMustExist()
    subject = "SumatraPDF build %s failed" % str(ver)
    checkin_url = "https://code.google.com/p/sumatrapdf/source/detail?r=%s" % str(
        ver)
    body = "Checkin: %s\n\n" % checkin_url
    build_log_url = s3_url_start + str(ver) + "/release_build_log.txt"
    body += "Build log: %s\n\n" % build_log_url
    buildbot_index_url = s3_url_start + "index.html"
    body += "Buildbot: %s\n\n" % buildbot_index_url
    util.sendmail(sender, senderpwd, g_email_to, subject, body)
Exemple #16
0
def main():
	verify_started_in_right_directory()
	# to avoid problems, we build a separate source tree, just for the buildbot
	src_path = os.path.join("..", "sumatrapdf_buildbot")
	verify_path_exists(src_path)
	conf = load_config()
	s3.set_secrets(conf.aws_access, conf.aws_secret)
	s3.set_bucket("kjkpub")
	os.chdir(src_path)

	#build_version("6698", skip_release=True)
	#build_index_html()
	#build_sizes_json()
	#build_curr(force=True)
	buildbot_loop()
Exemple #17
0
def main():
	verify_efi_present()
	verify_started_in_right_directory()
	# to avoid problems, we build a separate source tree, just for the buildbot
	src_path = os.path.join("..", "sumatrapdf_buildbot")
	verify_path_exists(src_path)
	conf = load_config()
	s3.set_secrets(conf.aws_access, conf.aws_secret)
	s3.set_bucket("kjkpub")
	os.chdir(src_path)

	#build_version("6698", skip_release=True)
	#test_build_html_index()
	#build_sizes_json()
	#build_curr(force=True)
	buildbot_loop()
Exemple #18
0
def email_tests_failed(ver, err):
    s3_url_start = "http://kjkpub.s3.amazonaws.com/sumatrapdf/buildbot/"
    c = load_config()
    if not c.HasNotifierEmail():
        print("email_tests_failed() not ran because not c.HasNotifierEmail()")
        return
    sender, senderpwd = c.GetNotifierEmailAndPwdMustExist()
    subject = "SumatraPDF tests failed for build %s" % str(ver)
    checkin_url = "https://code.google.com/p/sumatrapdf/source/detail?r=%s" % str(ver)
    body = "Checkin: %s\n\n" % checkin_url
    log_url = s3_url_start + str(ver) + "/tests_error.txt"
    body += "Build log: %s\n\n" % log_url
    buildbot_index_url = s3_url_start + "index.html"
    body += "Buildbot: %s\n\n" % buildbot_index_url
    body += "Error: %s\n\n" % err
    util.sendmail(sender, senderpwd, g_email_to, subject, body)
Exemple #19
0
def email_tests_failed(ver, err):
    s3_url_start = "http://kjkpub.s3.amazonaws.com/sumatrapdf/buildbot/"
    c = load_config()
    if not c.HasNotifierEmail():
        print("email_tests_failed() not ran because not c.HasNotifierEmail()")
        return
    sender, senderpwd = c.GetNotifierEmailAndPwdMustExist()
    subject = "SumatraPDF tests failed for build %s" % str(ver)
    checkin_url = "https://code.google.com/p/sumatrapdf/source/detail?r=%s" % str(ver)
    body = "Checkin: %s\n\n" % checkin_url
    log_url = s3_url_start + str(ver) + "/tests_error.txt"
    body += "Build log: %s\n\n" % log_url
    buildbot_index_url = s3_url_start + "index.html"
    body += "Buildbot: %s\n\n" % buildbot_index_url
    body += "Error: %s\n\n" % err
    util.sendmail(sender, senderpwd, g_email_to, subject, body)
Exemple #20
0
def train(opt):
    if torch.cuda.is_available():
        logger.info("%s", torch.cuda.get_device_name(0))

    # set etc
    torch.autograd.set_detect_anomaly(True)

    # set config
    config = load_config(opt)
    config['opt'] = opt
    logger.info("%s", config)

    # set path
    set_path(config)

    # prepare train, valid dataset
    train_loader, valid_loader = prepare_datasets(config)

    with temp_seed(opt.seed):
        # prepare model
        model = prepare_model(config)

        # create optimizer, scheduler, summary writer, scaler
        optimizer, scheduler, writer, scaler = prepare_osws(
            config, model, train_loader)
        config['optimizer'] = optimizer
        config['scheduler'] = scheduler
        config['writer'] = writer
        config['scaler'] = scaler

        # training
        early_stopping = EarlyStopping(logger,
                                       patience=opt.patience,
                                       measure='f1',
                                       verbose=1)
        local_worse_epoch = 0
        best_eval_f1 = -float('inf')
        for epoch_i in range(opt.epoch):
            epoch_st_time = time.time()
            eval_loss, eval_f1, best_eval_f1 = train_epoch(
                model, config, train_loader, valid_loader, epoch_i,
                best_eval_f1)
            # early stopping
            if early_stopping.validate(eval_f1, measure='f1'): break
            if eval_f1 == best_eval_f1:
                early_stopping.reset(best_eval_f1)
            early_stopping.status()
Exemple #21
0
    def _load(self):
        if not self.check_sanity():
            return

        if(not os.path.exists(AUTH_DIR)): os.mkdir(AUTH_DIR)

        # load config
        self.config    = util.load_config(self.path_conf)
        self.namespace = self.config.get("core", "namespace")
        self.clientid  = self.config.get("core", "clientid")

        # load services from config
        self.srvmap = {}
        for tok in self.config.get("backend", "services").split(","):
            srv = services.factory(tok)
            self.srvmap[srv.sid()] = srv

        self.nreplicas = int(self.config.get("backend", "nreplicas"))
            
        nthreads = self.options.nthreads if self.options is not None else 2
        self.scheduler = Scheduler(self.services, (nthreads+1)*len(self.srvmap))

        # load translator pipe
        if self.is_encypted():
            self.translators.append(translators.TrEncrypt(self))

        # TODO. for integrity option
        # if self.is_signed():
        #     self.translators.append(TrSigned(self))

        beg = time.time()
        if(os.path.exists(self.get_path("mapping.pcl"))):
            with open(self.get_path("mapping.pcl")) as f:
                self.mapping = pickle.load(f)
        else:
            mapconfig = []
            for srv in self.services:
                mapconfig.append((srv.sid(), srv.info_storage()/GB))
            hspacesum = sum(map(lambda x:x[1], mapconfig))
            hspace = max(hspacesum+1, 1024)
            self.mapping = DetMap2(mapconfig, hspace=hspace, replica=self.nreplicas)
            self.mapping.pack()
            with open(self.get_path("mapping.pcl"), "w") as f:
                pickle.dump(self.mapping, f)
        end = time.time()
        dbg.time("mapping init %s" % (end-beg))
        dbg.dbg("head: %s", self.get_head_name())
def run(dest, results_path):
    main_query = util.load_query()
    results = pd\
        .read_csv(os.path.join(results_path, "fall_response_corrs.csv"))\
        .set_index(['block', 'X', 'Y'])

    format_pearson = util.load_config()["latex"]["pearson"]

    fh = open(dest, "w")

    for (block, x, y), corrs in results.iterrows():
        x = "".join([i.capitalize() for i in x.split("_")])
        cmdname = "FallCorr{}v{}{}".format(x, y, block)
        cmd = format_pearson.format(**corrs)
        fh.write(util.newcommand(cmdname, cmd))

    fh.close()
Exemple #23
0
async def main():
    config = load_config()
    init_logger("pc.log")
    #setup networking
    datalogging = DataBackup(config=config)
    net = Net(
        target=config["net"]["target"],
        port=config["net"]["port"],
        node_type=True,
        data_backup=datalogging
    )

    async_methods = [
        net.send(),
        datalogging.BackupLoop(),
    ]
    return asyncio.gather(*async_methods)
Exemple #24
0
def uploadStringsIfChanged(skip_svn_check=False):
    # needs to have upload secret to protect apptranslator.org server from
    # abuse
    config = util.load_config()
    uploadsecret = config.trans_ul_secret
    if None is uploadsecret:
        print("Skipping string upload because don't have upload secret")
        return

    if not skip_svn_check:
        # Note: this check might be confusing due to how svn work
        # Unforunately, if you have local latest revision 5 and do a checkin to create
        # revision 6, svn info says that locally you're still on revision 5, even though
        # the code is actually as revision 6.
        # You need to do "svn update" to update local version number
        # Unfortunately I can't do it automatically here since it would be dangerous
        # (i.e. it would update code locally).
        # svn update is called in build.py, so it's not a problem if it's run
        # from  ./scripts/build-release.bat or ./scripts/build-pre-release.bat
        try:
            (local_ver, latest_ver) = util.get_svn_versions()
        except:
            print(
                "Skipping string upload because SVN isn't available to check for up-to-date-ness"
            )
            return
        if int(latest_ver) > int(local_ver):
            print(
                "Skipping string upload because your local version (%s) is older than latest in svn (%s)"
                % (local_ver, latest_ver))
            return

    strings = extract_strings_from_c_files()
    strings.sort()
    s = "AppTranslator strings\n" + string.join(strings, "\n")
    s = s.encode("utf8")

    if lastUploaded() == s:
        print(
            "Skipping upload because strings haven't changed since last upload"
        )
    else:
        uploadStringsToServer(s, uploadsecret)
        saveLastUploaded(s)
        print("Don't forget to checkin strings/last_uploaded.txt")
def main(models,
         source_file,
         nbest_file,
         saveto,
         b=80,
         normalize=False,
         verbose=False,
         alignweights=False):

    # load model model_options
    options = []
    for model in models:
        options.append(load_config(model))

        fill_options(options[-1])

    rescore_model(source_file, nbest_file, saveto, models, options, b,
                  normalize, verbose, alignweights)
Exemple #26
0
def main():
    cert_path()  # early check and ensures value is memoized
    verify_efi_present()
    verify_started_in_right_directory()
    # to avoid problems, we build a separate source tree, just for the buildbot
    src_path = os.path.join("..", "sumatrapdf_buildbot")
    verify_path_exists(src_path)
    conf = load_config()
    s3.set_secrets(conf.aws_access, conf.aws_secret)
    s3.set_bucket("kjkpub")
    os.chdir(src_path)

    # test_email_tests_failed()
    #build_version("8190", skip_release=True)
    # test_build_html_index()
    # build_sizes_json()
    # build_curr(force=True)
    buildbot_loop()
Exemple #27
0
def main(models,
         source_file,
         target_file,
         saveto,
         b=80,
         normalization_alpha=0.0,
         verbose=False,
         alignweights=False,
         extra_sources=[],
         per_word=False):
    # load model model_options
    options = []
    for model in models:
        options.append(load_config(model))

        fill_options(options[-1])

    # multi-source or single source functions
    if len(extra_sources) == 0:
        savetos = [saveto] + [file(saveto.name, 'w') for _ in extra_sources]
        multi_rescore_model(source_file,
                            target_file,
                            savetos,
                            models,
                            options,
                            b,
                            normalization_alpha,
                            verbose,
                            alignweights,
                            per_word=per_word)
    else:
        savetos = [saveto] + [file(saveto.name, 'w') for _ in extra_sources]
        #source_files = source_files + extra_sources
        multi_rescore_model(source_file,
                            target_file,
                            savetos,
                            models,
                            options,
                            b,
                            normalization_alpha,
                            verbose,
                            alignweights,
                            per_word=per_word,
                            extra_sources=extra_sources)
Exemple #28
0
def create_original_dictionary():
    """
    Create original dictionary
    """
    res = []

    df_new_word = pd.read_csv(FILE_NEW_WORD)
    df_new_word = df_new_word.assign(
        cost=lambda df: df.cost.fillna(1).astype(int),
        pos1=lambda df: df.pos1.fillna('名詞'),
        pos2=lambda df: df.pos2.fillna('固有名詞'),
        pos3=lambda df: df.pos3.fillna('一般'),
    )
    for _, row in df_new_word.iterrows():
        morph = create_morph(**row.to_dict())
        if morph:
            res.append(morph + '\n')

    with open(FILE_OUTPUT, 'w') as f:
        f.writelines(res)

    compile_dictionary()
    logger.info('New words added to dictionary')

    file_config = load_config('file')
    __NODE = {
        'keys':         ('features', 'cost'),
        'node-format':  ('%H',       '%pw'),
        'unk-format':   ('%H',       '%pw'),
    }
    parser = WordParser(**file_config['mecab'], node=__NODE)

    for file_path in (FILE_CLOSE_WORD, FILE_CLOSE_WORD_ORIGINAL):
        df_close_word = pd.read_csv(file_path)
        for row in df_close_word.itertuples():
            morph = replace_morph(parser, row.word, row.replace_word)
            if morph:
                res.append(morph + '\n')

    with open(FILE_OUTPUT, 'w') as f:
        f.writelines(res)
    logger.info(f'Created original dictionary in {FILE_OUTPUT}')
    compile_dictionary()
    logger.info(f'Original dictionary compiled!')
def uploadStringsIfChanged(skip_svn_check=False):
    # needs to have upload secret to protect apptranslator.org server from
    # abuse
    config = util.load_config()
    uploadsecret = config.trans_ul_secret
    if None is uploadsecret:
        print("Skipping string upload because don't have upload secret")
        return

    if not skip_svn_check:
        # Note: this check might be confusing due to how svn work
        # Unforunately, if you have local latest revision 5 and do a checkin to create
        # revision 6, svn info says that locally you're still on revision 5, even though
        # the code is actually as revision 6.
        # You need to do "svn update" to update local version number
        # Unfortunately I can't do it automatically here since it would be dangerous
        # (i.e. it would update code locally).
        # svn update is called in build.py, so it's not a problem if it's run
        # from  ./scripts/build-release.bat or ./scripts/build-pre-release.bat
        try:
            (local_ver, latest_ver) = util.get_svn_versions()
        except:
            print(
                "Skipping string upload because SVN isn't available to check for up-to-date-ness")
            return
        if int(latest_ver) > int(local_ver):
            print(
                "Skipping string upload because your local version (%s) is older than latest in svn (%s)" %
                (local_ver, latest_ver))
            return

    strings = extract_strings_from_c_files()
    strings.sort()
    s = "AppTranslator strings\n" + string.join(strings, "\n")
    s = s.encode("utf8")

    if lastUploaded() == s:
        print(
            "Skipping upload because strings haven't changed since last upload")
    else:
        uploadStringsToServer(s, uploadsecret)
        saveLastUploaded(s)
        print("Don't forget to checkin strings/last_uploaded.txt")
Exemple #30
0
def main(settings):
    """
    Translates a source language file (or STDIN) into a target language file
    (or STDOUT).
    """
    # Start logging.
    level = logging.DEBUG if settings.verbose else logging.INFO
    logging.basicConfig(level=level, format='%(levelname)s: %(message)s')

    # Create the TensorFlow session.
    tf_config = tf.ConfigProto()
    tf_config.allow_soft_placement = True
    session = tf.Session(config=tf_config)

    # Load config file for each model.
    configs = []
    for model in settings.models:
        config = util.load_config(model)
        compat.fill_options(config)
        config['reload'] = model
        configs.append(argparse.Namespace(**config))

    # Create the model graphs and restore their variables.
    logging.debug("Loading models\n")
    models = []
    for i, config in enumerate(configs):
        with tf.variable_scope("model%d" % i) as scope:
            model = rnn_model.RNNModel(config)
            saver = model_loader.init_or_restore_variables(
                config, session, ensemble_scope=scope)
            models.append(model)

    # Translate the source file.
    inference.translate_file(input_file=settings.input,
                             output_file=settings.output,
                             session=session,
                             models=models,
                             configs=configs,
                             beam_size=settings.beam_size,
                             nbest=settings.n_best,
                             minibatch_size=settings.minibatch_size,
                             maxibatch_size=settings.maxibatch_size,
                             normalization_alpha=settings.normalization_alpha)
Exemple #31
0
def main():
    verify_can_send_email()
    cert_path()  # early check and ensures value is memoized
    verify_started_in_right_directory()
    # to avoid problems, we build a separate source tree, just for the buildbot
    src_path = os.path.join("..", "sumatrapdf_buildbot")
    verify_path_exists(src_path)
    conf = load_config()
    s3.set_secrets(conf.aws_access, conf.aws_secret)
    s3.set_bucket("kjkpub")
    os.chdir(src_path)

    # test_email_tests_failed()
    #build_version("8190", skip_release=True)
    # test_build_html_index()
    # build_sizes_json()
    # build_curr(force=True)

    buildbot_loop()
Exemple #32
0
def __get_token() -> str:
    get_logger().info("__get_token is called.")

    token = load_config()
    encoded_key = b64encode(
        (token["api_key"] + ":" +
         token["api_key_secret"]).encode("ascii")).decode("ascii")

    url = "https://api.twitter.com/oauth2/token"
    params = {"grant_type": "client_credentials"}
    headers = {
        "Authorization": "Basic " + encoded_key,
        "Content-Type": "application/x-www-form-urlencoded;charset=UTF-8"
    }

    r = requests.post(url, params=params, headers=headers)
    if r.status_code != 200:
        raise TwAPIException(r.text)

    return r.json()["access_token"]
Exemple #33
0
def run_policy(policy):

    modelid = 0
    session = int(time.time())
    config = util.load_config("config.json")
    trainer = util.make_trainer(config, session)
    model = util.make_model(config)
    util.full_save(model, modelid, session)

    # Bookkeeping
    util.copy_config(config, session, additional_info={"policy": policy})

    # Train loop
    trace = [([False] * len(policy[0]), 0, 0)]
    for step in policy:
        trainer.log_line("Trace: " + "->".join([str(s) for s in trace]))
        loss = trainer.train(model, modelid, modelid + 1, step)
        trace.append((step, modelid + 1, modelid))
        modelid += 1
    trainer.log_line("Trace: " + "->".join([str(s) for s in trace]))
Exemple #34
0
def evaluate():
  if len(sys.argv) < 3:
    print(
        "Usage: python3 evaluate.py [config suffix] [model name]"
    )
    exit(-1)
  if len(sys.argv) == 3:
    print(
        " Note: Process a single image at a time may be inefficient - try multiple inputs)"
    )
  print("(TODO: batch processing when images have the same resolution)")
  print()
  print("Initializing...")
  config_name = sys.argv[1]
  import shutil
  shutil.copy('models/%s/%s/scripts/config_%s.py' %
              (config_name, sys.argv[2], config_name), 'config_tmp.py')
  cfg = load_config('tmp')
  cfg.name = sys.argv[1] + '/' + sys.argv[2]
  net = Discriminator_eval(cfg)
  net.eval()
Exemple #35
0
def main():
    parser = argparse.ArgumentParser()

    parser.add_argument('--config',
                        type=str,
                        default='configs/config-glove.json')
    parser.add_argument('--data_dir', type=str, default='data/conll2003')
    parser.add_argument('--embedding_path',
                        type=str,
                        default='embeddings/glove.6B.300d.txt')
    parser.add_argument("--seed", default=5, type=int)
    # for BERT
    parser.add_argument(
        "--bert_model_name_or_path",
        type=str,
        default='bert-base-uncased',
        help="Path to pre-trained model or shortcut name(ex, bert-base-uncased)"
    )
    parser.add_argument(
        "--bert_do_lower_case",
        action="store_true",
        help="Set this flag if you are using an uncased model.")
    opt = parser.parse_args()

    # set seed
    random.seed(opt.seed)

    # set config
    config = load_config(opt)
    config['opt'] = opt
    logger.info("%s", config)

    if config['emb_class'] == 'glove':
        preprocess_glove_or_elmo(config)
    if config['emb_class'] in [
            'bert', 'distilbert', 'albert', 'roberta', 'bart', 'electra'
    ]:
        preprocess_bert(config)
    if config['emb_class'] == 'elmo':
        preprocess_glove_or_elmo(config)
Exemple #36
0
def load_arguments(config_file=None):
    """
        Load CLI input, load config.toml , overwrite config.toml by CLI Input
    """
    if config_file is None:
        cur_path = os.path.dirname(os.path.realpath(__file__))
        config_file = os.path.join(cur_path, "config.toml")
    print(config_file)

    p = argparse.ArgumentParser()
    p.add_argument("--config_file", default=config_file, help="Params File")
    p.add_argument("--config_mode", default="test", help="test/ prod /uat")
    p.add_argument("--log_file", help="File to save the logging")

    p.add_argument("--do", default="test", help="what to do test or search")
    p.add_argument("--ntrials",
                   default=100,
                   help='number of trials during the hyperparameters tuning')
    p.add_argument(
        "--modelname",
        default="model_dl.1_lstm.py",
        help=
        "name of the model to be tuned this name will be used to save the model"
    )
    p.add_argument("--data_path",
                   default="dataset/GOOG-year_small.csv",
                   help="path of the training file")
    p.add_argument('--optim_engine',
                   default='optuna',
                   help='Optimization engine')
    p.add_argument('--optim_method',
                   default='normal/prune',
                   help='Optimization method')
    p.add_argument('--save_folder',
                   default='model_save',
                   help='folder that will contain saved version of best model')

    args = p.parse_args()
    args = load_config(args, args.config_file, args.config_mode, verbose=0)
    return args
def main(new_ver):
    url_update = "https://kjkpub.s3.amazonaws.com/sumatrapdf/sumpdf-update.txt"
    url_latest = "https://kjkpub.s3.amazonaws.com/sumatrapdf/sumpdf-latest.txt"

    conf = load_config()
    aws_access, aws_secret = conf.GetAwsCredsMustExist()
    s3.set_secrets(aws_access, aws_secret)
    s3.set_bucket("kjkpub")

    v1 = get_latest_version(url_latest)
    (v2, ver_4) = get_update_versions(url_update)
    validate_ver(ver_4)
    assert not v2 or v1 == v2, "sumpdf-update.txt and sumpdf-latest.txt don't agree on Stable version, run build.py -release first"

    if not new_ver:
        print("Current version: %s. To update run:\npython scripts\update_auto_update_ver.py <new_version>" % v1)
        return

    verify_version_not_lower(new_ver, v1, v2)
    sys.stdout.write("Current version: %s\nGoing to update auto-update version to %s. Are you sure? [y/N] " % (v1, new_ver))
    sys.stdout.flush()
    ch = getch()
    print()
    if ch not in ['y', 'Y']:
        print("Didn't update because you didn't press 'y'")
        sys.exit(1)

    # remove the Stable version from sumpdf-update.txt
    s = "[SumatraPDF]\nLatest %s\n" % new_ver
    s3.upload_data_public(s, "sumatrapdf/sumpdf-update.txt")
    # keep updating the legacy file for now
    s = "%s\n" % new_ver
    s3.upload_data_public(s, "sumatrapdf/sumpdf-latest.txt")
    v1 = get_latest_version(url_latest)
    (v2, v3) = get_update_versions(url_update)
    if v1 != new_ver or v2 != None or v3 != new_ver:
        print("Upload failed because v1 or v3 != ver ('%s' or '%s' != '%s'" % (v1, v3, new_ver))
        sys.exit(1)
    print("Successfully update auto-update version to '%s'" % new_ver)
Exemple #38
0
def test(t_agent, trial_dir, visual_flag, token):
    assert trial_dir is not None and os.path.exists(trial_dir)

    # prepare trial environment
    pid = os.getpid()
    logger, _ = prepare_for_logging(str(pid), create_folder=False)

    # load config
    config_file = os.path.join(trial_dir, "config.yaml")
    if not os.path.exists(config_file):
        convert_legacy_config(trial_dir, t_agent)
    config = util.load_config(config_file)

    if "max_obstacles" not in config:
        config["max_obstacles"] = 3
    env = NIPS(visualize=visual_flag,
               max_obstacles=config["max_obstacles"],
               token=token)
    util.print_settings(logger, config, env)

    # instantiate an agent
    config["logger"] = logger
    config["log_dir"] = trial_dir
    config["model_dir"] = trial_dir
    if t_agent == "DDPG":
        from ddpg import DDPG
        agent = DDPG(env, config)
    elif t_agent == "TRPO":
        from trpo import TRPO
        agent = TRPO(env, config)
    else:
        raise ValueError("Unsupported agent type: {}".format(t_agent))
    agent.set_state(config)

    # test
    util.print_sec_header(logger, "Testing")
    rewards = agent.test(logging=env.remote_env)
    logger.info("avg_reward={}".format(np.mean(rewards)))
    env.close()
Exemple #39
0
def main():
    config = util.load_config()
    driver = get_chrome_driver(config)
    data = {}
    # Open reviews and pick first review to start crawling
    driver.get("https://steamcommunity.com/?subsection=reviews")
    print(driver.title)

    # Locate submit button, click to get all results
    driver.implicitly_wait(15)
    reviewCards = driver.find_elements_by_class_name("apphub_Card")
    reviewCards[1].click()  # Mod as needed, some profiles are private
    element = driver.find_element_by_xpath(
        "//a[contains(@href, 'steamcommunity.com/id')]")
    profile_link = element.text.split('/')[4]
    steamID64 = get_steam_id(element, driver)
    print("Writing to csv...")
    data[steamID64] = crawl_friends(profile_link, driver)
    write_to_json(data)
    time.sleep(10)
    # Close everything
    driver.quit()
    return 0
Exemple #40
0
def uploadStringsIfChanged():
    # needs to have upload secret to protect apptranslator.org server from abuse
    config = util.load_config()
    uploadsecret = config.trans_ul_secret
    if None is uploadsecret:
        print("Skipping string upload because don't have upload secret")
        return

    # TODO: we used to have a check if svn is up-to-date
    # should we restore it for git?

    strings = extract_strings_from_c_files()
    strings.sort()
    s = "AppTranslator strings\n" + string.join(strings, "\n")
    s = s.encode("utf8")

    if lastUploaded() == s:
        print(
            "Skipping upload because strings haven't changed since last upload")
    else:
        uploadStringsToServer(s, uploadsecret)
        saveLastUploaded(s)
        print("Don't forget to checkin strings/last_uploaded.txt")
Exemple #41
0
def main():
    url_update = "http://kjkpub.s3.amazonaws.com/sumatrapdf/sumpdf-update.txt"
    url_latest = "http://kjkpub.s3.amazonaws.com/sumatrapdf/sumpdf-latest.txt"

    conf = load_config()
    assert conf.aws_access != "" and conf.aws_secret != ""
    s3.set_secrets(conf.aws_access, conf.aws_secret)
    s3.set_bucket("kjkpub")

    v1 = get_latest_version(url_latest)
    (v2, ver) = get_update_versions(url_update)
    validate_ver(ver)
    assert not v2 or v1 == v2, "sumpdf-update.txt and sumpdf-latest.txt don't agree on Stable version, run build.py -release first"
    verify_version_not_lower(ver, v1, v2)
    sys.stdout.write(
        "Going to update auto-update version to %s. Are you sure? [y/N] " %
        ver)
    sys.stdout.flush()
    ch = getch()
    print()
    if ch not in ['y', 'Y']:
        print("Didn't update because you didn't press 'y'")
        sys.exit(1)

    # remove the Stable version from sumpdf-update.txt
    s = "[SumatraPDF]\nLatest %s\n" % ver
    s3.upload_data_public(s, "sumatrapdf/sumpdf-update.txt")
    # keep updating the legacy file for now
    s = "%s\n" % ver
    s3.upload_data_public(s, "sumatrapdf/sumpdf-latest.txt")
    v1 = get_latest_version(url_latest)
    (v2, v3) = get_update_versions(url_update)
    if v1 != ver or v2 != None or v3 != ver:
        print("Upload failed because v1 or v3 != ver ('%s' or '%s' != '%s'" %
              (v1, v3, ver))
        sys.exit(1)
    print("Successfully update auto-update version to '%s'" % ver)
Exemple #42
0
def main():
    from_to = []
    config = util.load_config()
    with open(config["steam_ids"]["results"], 'r') as f:
        results_json = json.load(f)
        keys = [n for n in results_json.keys()]
        steamids = results_json[keys[0]] 
    print("Closing config...")
    api_key = config['steam_api_key']['key']
    steam_data = []
    print("Starting to iterate...")
    for i in range(10):
        steamid = steamids[i]
        #Web API osoitteet, joista voidaan hakea tietoa
        api_getownedgames = 'http://api.steampowered.com/IPlayerService/GetOwnedGames/v001/?key={}&steamid={}&format=json&include_appinfo=1&include_played_free_games=1'.format(api_key, steamid)
        #Haetaan webistä steamID:n käyttäjän kaverit listaan steamIDs
        print(f'Making request with: {api_getownedgames}')
        try:
            r = requests.get(api_getownedgames)
            data = r.json()
            game_nodes = [s for s in data['response']['games']]
            gameIDs = [n['name'] for n in game_nodes]
            print(steamid)
            from_to = [n for n in gameIDs]
            for node in from_to:
                steam_data.append([int(steamid), node])
            '''
            OBS! Not sure if this works in gephi, as it saves game names (str). If 'name' from row 42 changed to
            'appid' it gives a int, which works with gephi
            '''
        except KeyError:
            print("Failed request, KeyError")
            continue

    write_to_node_csv(steam_data)
    return 0
Exemple #43
0
def main():
    config = util.load_config()
    sc, ssc, sqlc = util.get_spark_streaming_context()

    # receive lines of data
    lines = ssc.socketTextStream('localhost', config['port'])

    # flatmap lines to list of dict
    items = lines.flatMap(util.bin2json)

    # flatmap lines to fake new class (NG, T, F)
    def analyzer_check(item):
        result = analyzer.check(item['paragraph'])
        item['not_given'] = result == analyzer.NOT_GIVEN
        item['truth'] = result == analyzer.TRUE
        return item
    checked_items = items.map(analyzer_check)
    # checked_items.pprint()

    # filter item with not given false, which is relevant
    # or in our knowledge domain
    def is_relevant(item):
        if not item['not_given']:
            del item['not_given']
            return item
    relevant_items = checked_items.filter(is_relevant)
    relevant_items.pprint()

    # save mined data
    def save_streaming_data(time, rdd):
        if not rdd.isEmpty():
            saver.save(sqlc, config, 'news', rdd)
    relevant_items.foreachRDD(save_streaming_data)

    ssc.start()
    ssc.awaitTermination()
def uploadStringsIfChanged():
    # needs to have upload secret to protect apptranslator.org server from abuse
    config = util.load_config()
    uploadsecret = config.trans_ul_secret
    if None is uploadsecret:
        print("Skipping string upload because don't have upload secret")
        return

    # TODO: we used to have a check if svn is up-to-date
    # should we restore it for git?

    strings = extract_strings_from_c_files()
    strings.sort()
    s = "AppTranslator strings\n" + string.join(strings, "\n")
    s = s.encode("utf8")

    if lastUploaded() == s:
        print(
            "Skipping upload because strings haven't changed since last upload"
        )
    else:
        uploadStringsToServer(s, uploadsecret)
        saveLastUploaded(s)
        print("Don't forget to checkin strings/last_uploaded.txt")
Exemple #45
0
    def __new__(class_, *args, **kwargs):
        if minicluster.default_cluster is not None:
            return minicluster.default_cluster.peloton_client()

        global _client
        if class_._client:
            return class_._client

        config = load_config("config.yaml")["client"]
        cluster = os.getenv("CLUSTER")
        use_apiserver = os.getenv("USE_APISERVER") == 'True'
        if cluster is None or cluster == "local":
            # TODO: remove url overrides once T839783 is resolved
            _client = PelotonClient(
                name=config["name"],
                enable_apiserver=use_apiserver,
                api_url=config["apiserver_url"],
                jm_url=config["jobmgr_url"],
                rm_url=config["resmgr_url"],
                hm_url=config["hostmgr_url"],
            )
            return _client

        if os.getenv("ELECTION_ZK_SERVERS", ""):
            zk_servers = os.getenv("ELECTION_ZK_SERVERS").split(":")[0]
        elif cluster in config["cluster_zk_servers"]:
            zk_servers = config["cluster_zk_servers"][cluster]
        else:
            raise Exception("Unsupported cluster %s" % cluster)

        _client = PelotonClient(
            name=config["name"],
            enable_apiserver=use_apiserver,
            zk_servers=zk_servers,
        )
        return _client
Exemple #46
0
 def __new__(class_, *args, **kwargs):
     global _client
     if not class_._client:
         config = load_config("config.yaml")["client"]
         cluster = os.getenv("CLUSTER")
         if cluster is not None and cluster != "local":
             cluster = os.getenv("CLUSTER")
             if os.getenv("ELECTION_ZK_SERVERS", ""):
                 zk_servers = os.getenv("ELECTION_ZK_SERVERS").split(":")[0]
             elif cluster in config["cluster_zk_servers"]:
                 zk_servers = config["cluster_zk_servers"][cluster]
             else:
                 raise Exception("Unsupported cluster %s" % cluster)
             _client = PelotonClient(name=config["name"],
                                     zk_servers=zk_servers)
         else:
             # TODO: remove url overrides once T839783 is resolved
             _client = PelotonClient(
                 name=config["name"],
                 jm_url=config["jobmgr_url"],
                 rm_url=config["resmgr_url"],
                 hm_url=config["hostmgr_url"],
             )
     return _client
import json
import requests
import sys
import util
import os


def list_interface_by_host( hostip, userid, passwd):
	print hostip
	resp = requests.post( util.get_nxapi_endpoint( hostip), data=json.dumps( util.get_payload( "show interface brief")), headers=util.myheaders,auth=(userid,passwd)).json()
	outputs = resp['ins_api']['outputs']
	if not 'Success' in outputs['output']['msg']:
		return
	try:
		for row in outputs['output']['body']['TABLE_interface']['ROW_interface']:
			if row['state'] == 'up':
				print " - ", row['interface'], row['vlan'] if row.has_key('vlan') else '--', row['portmode'] if row.has_key('portmode') else ''

	except Exception as e:
		print e


if __name__ == "__main__":
	hosts = util.load_config( sys.argv[1])
	allhosts = hosts['spine'];
	allhosts.extend( hosts['leaf'])
	allhosts.extend( hosts['router'])
	for host in allhosts:
		list_interface_by_host( host, os.environ['NEXUS_USER'], os.environ['NEXUS_PASSWD'])
Exemple #48
0
            print

def analyze_feature_set(args, connection, feature_names):
    """Analyzes and plots various standard metrics for each feature."""

    for name in feature_names:
        values = util.get_feature_values(connection.cursor(), name)

        stats.analyze_feature(connection, name, values)

        if args.visualize:
            visualize.plot_feature(connection, name, values)

if __name__ == '__main__':
    from util import load_config, connect_db
    CONFIG = load_config()

    connection = connect_db(CONFIG)

    model_feature_names = map(lambda f: f.__name__, MODEL_FEATURES)
    cluster_feature_names = map(lambda f: f.__name__, CLUSTER_FEATURES)

    algorithm_names = map(lambda a: a.__name__, clustering.ENABLED_ALGORITHMS)

    timespan = lambda s: tuple(map(int, s.split(',', 1)))

    import argparse
    parser = argparse.ArgumentParser(description="Parses sentences.")
    parser.add_argument('-b', '--batch-size', help="batch size", type=int, default=500)
    parser.add_argument('-t', '--timespan', help="time span to use: from,to in seconds since unix epoch", default=(None, None), type=timespan)
    parser.add_argument('--reset', help="clear features before rebuilding", action='store_true', default=False)
Exemple #49
0
            elif item.get('debug', False):
                try:
                    if 0 <= int(item['type']) <= 999:
                        continue
                except ValueError:
                    pass
                if item['type'] in ['CONNECTED', 'PING', 'JOIN', 'PART', 'QUIT', 'MODE', 'KICK', 'BAN']:
                    continue
                channel = self.slack_client.server.channels.find('slairck-debug')
                if channel is not None:
                    channel.send_message(unicode(item))


if __name__ == "__main__":
    from util import main_loop

    config = load_config('slack')
    debug = config["DEBUG"]

    bot = SlackBot(config['slack']['token'], config)
    site_plugins = []
    files_currently_downloading = []
    job_hash = {}

    if 'DAEMON' in config:
        if config["DAEMON"]:
            import daemon
            with daemon.DaemonContext():
                main_loop(bot, config)
    main_loop(bot, config)
coherence = Coherence(config)

controlpoint = ControlPoint(coherence,auto_client=[])

BOOKMARKPATH = os.path.expanduser('~/.grace-bookmarks')

devices = []
unknown_devices = []

CONFPATH = os.path.join(os.path.expanduser('~'), '.graceradiorc')
DEFAULT_CONFIG = {
# Number of lines to output from the buffer every time enter is pressed
    "buffer_rate": 20,
}

CONFIG = load_config(DEFAULT_CONFIG, CONFPATH)

def add_device(device=None, *args, **kwargs):
    control = None
    for service in device.services:
        _,_,_,service_class,version = service.service_type.split(':')
        if service_class == 'RecivaRadio':
            control = service
    d = {
          'name': device.get_friendly_name(),
          'device':device,
          'control':control,
        }
    if control:
        devices.append(d)
    else:
Exemple #51
0
            bot.init()
        while True:
            for bot in bots:
                # print 'processing', bot
                bot.process()
                relay_ins = bot.collect_relay()
                for xbot in bots:
                    if type(bot) == type(xbot):
                        continue
                    xbot.relay(bot, relay_ins)

            time.sleep(0.2)
    except KeyboardInterrupt:
        sys.exit(0)
    except:
        logging.exception("OOPS")


if __name__ == "__main__":
    config = load_config("config")
    slackbot = SlackBot(config["slack"]["token"], config)
    ircbot = IrcBot(config["irc"]["host"], int(config["irc"].get("port", "6667")), config)

    if "DAEMON" in config:
        if config["DAEMON"]:
            import daemon

            with daemon.DaemonContext():
                main_loop((slackbot, ircbot), config)
    main_loop((slackbot, ircbot), config)
 def _load_service_config(self):
     return dict_map_string(
         load_config(SERVICE_CONFIG_FILE),
         self._substitute_constants)
def verify_can_send_email():
    c = load_config()
    if not c.HasNotifierEmail():
        print("can't run. scripts/config.py missing notifier_email and/or notifier_email_pwd")
        sys.exit(1)
	try:
		ssh_client.connect( hostip, username=userid, password=passwd)
		with closing( scpclient.Write( ssh_client.get_transport(), '.')) as scp:
			scp.send_file( 'update_cdp.py', True)
		retval = True
	except Exception:
		print 'failed to scp update_cdp.py'
	return retval


def update_cdp(hostip, userid, passwd):
	retval = True
	target_cmd = "python bootflash:update_cdp.py"
	target_cmd = util.remove_last_semicolon(target_cmd)
	resp = requests.post( util.get_nxapi_endpoint( hostip), data=json.dumps( util.get_conf_payload( target_cmd)), headers=util.myheaders,auth=(userid,passwd)).json()
	outputs = resp['ins_api']['outputs']['output']
	#print outputs
	if not 'Success' in outputs['msg']:
		retval = False
	print 'update_cdp on %s is %s' %(hostip, retval)
	return retval


if __name__ == '__main__':
	roles  = util.load_config( sys.argv[1]) #hosts.yaml
	for role in roles.keys():
		for host in roles[role]:
			scp_update_cdp_code( host, os.environ['NEXUS_USER'], os.environ['NEXUS_PASSWD'])
			update_cdp( host, os.environ['NEXUS_USER'], os.environ['NEXUS_PASSWD'])

Exemple #55
0
import os

from docopt import docopt
from pygit2 import Repository
import gitsound

if __name__ == '__main__':

    # Initialize docopt and grab args
    args = docopt(__doc__)

    # Simplify references to args
    cmd = args['<command>']
    arg = args['<argument>']

    config = util.load_config()

    user = gitsound.SpotifyUser(
        config["uid"], config["client_id"], config["client_secret"],
        config["redirect_uri"])

    uid = config["current_playlist"]["uid"]
    pid = config["current_playlist"]["pid"]
    pname = config["current_playlist"]["name"]

    # Determine how to handle args
    if (cmd == 'show'):
        if (arg == 'local'):
            playlists = []
            git_dir = ".activePlaylists/" + user.username + "/"
            pids = [pid for pid in os.listdir(git_dir)]
Exemple #56
0
def build(upload, upload_tmp, testing, build_test_installer, build_rel_installer, build_prerelease, skip_transl_update, svn_revision, target_platform):

    verify_started_in_right_directory()
    try_find_config_files()
    if build_prerelease:
        if svn_revision is None:
            run_cmd_throw("svn", "update")
            (out, err) = run_cmd_throw("svn", "info")
            ver = str(parse_svninfo_out(out))
        else:
            # allow to pass in an SVN revision, in case SVN itself isn't
            # available
            ver = svn_revision
    else:
        ver = extract_sumatra_version(os.path.join("src", "Version.h"))
        if upload:
            verify_correct_branch(ver)
            verify_not_tagged_yet(ver)

    log("Version: '%s'" % ver)

    # don't update translations for release versions to prevent Trunk changes
    # from messing up the compilation of a point release on a branch
    if build_prerelease and not skip_transl_update:
        trans_upload.uploadStringsIfChanged()
        changed = trans_download.downloadAndUpdateTranslationsIfChanged()
        # Note: this is not a perfect check since re-running the script will
        # proceed
        if changed:
            print(
                "\nNew translations have been downloaded from apptranslator.og")
            print(
                "Please verify and checkin src/Translations_txt.cpp and strings/translations.txt")
            sys.exit(1)

    filename_base = "SumatraPDF-%s" % ver
    if build_prerelease:
        filename_base = "SumatraPDF-prerelease-%s" % ver

    s3_dir = "sumatrapdf/rel"
    if build_prerelease:
        s3_dir = "sumatrapdf/prerel"
    if upload_tmp:
        upload = True
        s3_dir += "tmp"

    if upload:
        log("Will upload to s3 at %s" % s3_dir)
        conf = load_config()
        s3.set_secrets(conf.aws_access, conf.aws_secret)
        s3.set_bucket("kjkpub")

    s3_prefix = "%s/%s" % (s3_dir, filename_base)
    s3_exe = s3_prefix + ".exe"
    s3_installer = s3_prefix + "-install.exe"
    s3_pdb_lzsa = s3_prefix + ".pdb.lzsa"
    s3_pdb_zip = s3_prefix + ".pdb.zip"
    s3_exe_zip = s3_prefix + ".zip"

    s3_files = [s3_exe, s3_installer, s3_pdb_lzsa, s3_pdb_zip]
    if not build_prerelease:
        s3_files.append(s3_exe_zip)

    cert_pwd = None
    cert_path = os.path.join("scripts", "cert.pfx")
    if upload:
        map(s3.verify_doesnt_exist, s3_files)
        verify_path_exists(cert_path)
        conf = load_config()
        cert_pwd = conf.GetCertPwdMustExist()

    obj_dir = "obj-rel"
    if target_platform == "X64":
        obj_dir += "64"

    if not testing and not build_test_installer and not build_rel_installer:
        shutil.rmtree(obj_dir, ignore_errors=True)
        shutil.rmtree(os.path.join("mupdf", "generated"), ignore_errors=True)

    config = "CFG=rel"
    if build_test_installer and not build_prerelease:
        obj_dir = "obj-dbg"
        config = "CFG=dbg"
    extcflags = ""
    if build_prerelease:
        extcflags = "EXTCFLAGS=-DSVN_PRE_RELEASE_VER=%s" % ver
    platform = "PLATFORM=%s" % (target_platform or "X86")

    # build executables for signing (building the installer will build the rest)
    (out, err) = run_cmd_throw("nmake", "-f", "makefile.msvc",
                               config, extcflags, platform,
                               "SumatraPDF", "Uninstaller")
    if build_test_installer:
        print_run_resp(out, err)

    exe = os.path.join(obj_dir, "SumatraPDF.exe")
    sign_retry(exe, cert_pwd)
    sign_retry(os.path.join(obj_dir, "SumatraPDF-no-MuPDF.exe"), cert_pwd)
    sign_retry(os.path.join(obj_dir, "uninstall.exe"), cert_pwd)

    (out, err) = run_cmd_throw("nmake", "-f", "makefile.msvc",
                               "Installer", config, platform, extcflags)
    if build_test_installer:
        print_run_resp(out, err)

    if build_test_installer or build_rel_installer:
        sys.exit(0)

    installer = os.path.join(obj_dir, "Installer.exe")
    sign_retry(installer, cert_pwd)

    pdb_lzsa_archive = create_pdb_lzsa_archive(obj_dir, "%s.pdb.lzsa" % filename_base)
    pdb_zip_archive = create_pdb_zip_archive(obj_dir, "%s.pdb.zip" % filename_base)

    builds_dir = os.path.join("builds", ver)
    if os.path.exists(builds_dir):
        shutil.rmtree(builds_dir)
    os.makedirs(builds_dir)

    copy_to_dst_dir(exe, builds_dir)
    copy_to_dst_dir(installer, builds_dir)
    copy_to_dst_dir(pdb_lzsa_archive, builds_dir)
    copy_to_dst_dir(pdb_zip_archive, builds_dir)

    # package portable version in a .zip file
    if not build_prerelease:
        exe_zip_name = "%s.zip" % filename_base
        zip_one_file(obj_dir, "SumatraPDF.exe", exe_zip_name)
        exe_zip_path = os.path.join(obj_dir, exe_zip_name)
        copy_to_dst_dir(exe_zip_path, builds_dir)

    if not upload:
        return

    if build_prerelease:
        jstxt = 'var sumLatestVer = %s;\n' % ver
        jstxt += 'var sumBuiltOn = "%s";\n' % time.strftime("%Y-%m-%d")
        jstxt += 'var sumLatestName = "%s";\n' % s3_exe.split("/")[-1]
        jstxt += 'var sumLatestExe = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_exe
        jstxt += 'var sumLatestPdb = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_pdb_zip
        jstxt += 'var sumLatestInstaller = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_installer

    s3.upload_file_public(installer, s3_installer)
    s3.upload_file_public(pdb_lzsa_archive, s3_pdb_lzsa)
    s3.upload_file_public(pdb_zip_archive, s3_pdb_zip)
    s3.upload_file_public(exe, s3_exe)

    if build_prerelease:
        s3.upload_data_public(jstxt, "sumatrapdf/sumatralatest.js")
        # don't set a Stable version for prerelease builds
        txt = "[SumatraPDF]\nLatest %s\n" % ver
        s3.upload_data_public(txt, "sumatrapdf/sumpdf-prerelease-update.txt")
        # keep updating the legacy file for now
        txt = "%s\n" % ver
        s3.upload_data_public(txt, "sumatrapdf/sumpdf-prerelease-latest.txt")
        delete_old_pre_release_builds()
    else:
        # update the Latest version for manual update checks but
        # leave the Stable version for automated update checks
        update_url = "http://kjkpub.s3.amazonaws.com/sumatrapdf/sumpdf-update.txt"
        ver_stable = get_stable_version(update_url, "2.5.2")
        s3.upload_file_public(exe_zip_path, s3_exe_zip)
        s3.upload_data_public("[SumatraPDF]\nLatest %s\nStable %s\n" % (ver, ver_stable), "sumatrapdf/sumpdf-update.txt")

    if not build_prerelease:
        svn_tag_release(ver)
        upload_sources.upload(ver)
def main():
  global upload
  if len(args) != 0:
    usage()
  verify_started_in_right_directory()

  if build_prerelease:
    if svn_revision is None:
      run_cmd_throw("svn", "update")
      (out, err) = run_cmd_throw("svn", "info")
      ver = str(parse_svninfo_out(out))
    else:
      # allow to pass in an SVN revision, in case SVN itself isn't available
      ver = svn_revision
  else:
    ver = extract_sumatra_version(os.path.join("src", "Version.h"))
  log("Version: '%s'" % ver)

  # don't update translations for release versions to prevent Trunk changes
  # from messing up the compilation of a point release on a branch
  if g_new_translation_system and build_prerelease and not skip_transl_update:
    trans_upload.uploadStringsIfChanged()
    changed = trans_download.downloadAndUpdateTranslationsIfChanged()
    # Note: this is not a perfect check since re-running the script will
    # proceed
    if changed:
      print("\nNew translations have been downloaded from apptranslator.og")
      print("Please verify and checkin src/Translations_txt.cpp and strings/translations.txt")
      sys.exit(1)

  filename_base = "SumatraPDF-%s" % ver
  if build_prerelease:
    filename_base = "SumatraPDF-prerelease-%s" % ver

  s3_dir = "sumatrapdf/rel"
  if build_prerelease:
    s3_dir = "sumatrapdf/prerel"
  if upload_tmp:
    upload = True
    s3_dir += "tmp"

  if upload:
    log("Will upload to s3 at %s" % s3_dir)
    conf = load_config()
    s3.set_secrets(conf.aws_access, conf.aws_secret)
    s3.set_bucket("kjkpub")

  s3_prefix = "%s/%s" % (s3_dir, filename_base)
  s3_exe           = s3_prefix + ".exe"
  s3_installer     = s3_prefix + "-install.exe"
  s3_pdb_zip       = s3_prefix + ".pdb.zip"
  s3_exe_zip       = s3_prefix + ".zip"

  s3_files = [s3_exe, s3_installer, s3_pdb_zip]
  if not build_prerelease:
    s3_files.append(s3_exe_zip)

  cert_pwd = None
  cert_path = os.path.join("scripts", "cert.pfx")
  if upload:
    map(s3.verify_doesnt_exist, s3_files)
    verify_path_exists(cert_path)
    conf = load_config()
    cert_pwd = conf.GetCertPwdMustExist()

  obj_dir = "obj-rel"
  if target_platform == "X64":
    obj_dir += "64"

  if not testing and not build_test_installer and not build_rel_installer:
    shutil.rmtree(obj_dir, ignore_errors=True)
    shutil.rmtree(os.path.join("mupdf", "generated"), ignore_errors=True)

  config = "CFG=rel"
  if build_test_installer and not build_prerelease:
    obj_dir = "obj-dbg"
    config = "CFG=dbg"
  extcflags = ""
  if build_prerelease:
    extcflags = "EXTCFLAGS=-DSVN_PRE_RELEASE_VER=%s" % ver
  platform = "PLATFORM=%s" % (target_platform or "X86")

  run_cmd_throw("nmake", "-f", "makefile.msvc", config, extcflags, platform, "all_sumatrapdf")
  exe = os.path.join(obj_dir, "SumatraPDF.exe")
  if upload:
    sign(exe, cert_pwd)
    sign(os.path.join(obj_dir, "uninstall.exe"), cert_pwd)

  build_installer_data(obj_dir)
  run_cmd_throw("nmake", "-f", "makefile.msvc", "Installer", config, platform, extcflags)

  if build_test_installer or build_rel_installer:
    sys.exit(0)

  installer = os.path.join(obj_dir, "Installer.exe")
  if upload:
    sign(installer, cert_pwd)

  pdb_zip = os.path.join(obj_dir, "%s.pdb.zip" % filename_base)

  zip_file(pdb_zip, os.path.join(obj_dir, "libmupdf.pdb"))
  zip_file(pdb_zip, os.path.join(obj_dir, "Installer.pdb"), append=True)
  zip_file(pdb_zip, os.path.join(obj_dir, "SumatraPDF-no-MuPDF.pdb"), append=True)
  zip_file(pdb_zip, os.path.join(obj_dir, "SumatraPDF.pdb"), append=True)

  builds_dir = os.path.join("builds", ver)
  if os.path.exists(builds_dir):
    shutil.rmtree(builds_dir)
  os.makedirs(builds_dir)

  copy_to_dst_dir(exe, builds_dir)
  copy_to_dst_dir(installer, builds_dir)
  copy_to_dst_dir(pdb_zip, builds_dir)

  if not build_prerelease:
    exe_zip = os.path.join(obj_dir, "%s.zip" % filename_base)
    zip_file(exe_zip, exe, "SumatraPDF.exe", compress=True)
    verify_path_exists(exe_zip)
    copy_to_dst_dir(exe_zip, builds_dir)

  if not upload: return

  if build_prerelease:
    jstxt  = 'var sumLatestVer = %s;\n' % ver
    jstxt += 'var sumBuiltOn = "%s";\n' % time.strftime("%Y-%m-%d")
    jstxt += 'var sumLatestName = "%s";\n' % s3_exe.split("/")[-1]
    jstxt += 'var sumLatestExe = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_exe
    jstxt += 'var sumLatestPdb = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_pdb_zip
    jstxt += 'var sumLatestInstaller = "http://kjkpub.s3.amazonaws.com/%s";\n' % s3_installer

  s3.upload_file_public(installer, s3_installer)
  s3.upload_file_public(pdb_zip, s3_pdb_zip)
  s3.upload_file_public(exe, s3_exe)

  if build_prerelease:
    s3.upload_data_public(jstxt, "sumatrapdf/sumatralatest.js")
    txt = "%s\n" % ver
    s3.upload_data_public(txt, "sumatrapdf/sumpdf-prerelease-latest.txt")
    deleteOldPreReleaseBuilds()
  else:
    s3.upload_file_public(exe_zip, s3_exe_zip)
Exemple #58
0
def main():
    global SCREEN_FULLSCREEN
    pygame.init()

    util.load_config()

    if len(sys.argv) > 1:
        for arg in sys.argv:
            if arg == "-np":
                Variables.particles = False
            elif arg == "-na":
                Variables.alpha = False
            elif arg == "-nm":
                Variables.music = False
            elif arg == "-ns":
                Variables.sound = False
            elif arg == "-f":
                SCREEN_FULLSCREEN = True

    scr_options = 0
    if SCREEN_FULLSCREEN: scr_options += FULLSCREEN
    screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT),scr_options ,32)

    pygame.display.set_icon(util.load_image("kuvake"))
    pygame.display.set_caption("Trip on the Funny Boat")

    init()

    joy = None
    if pygame.joystick.get_count() > 0:
        joy = pygame.joystick.Joystick(0)
        joy.init()

    try:
        util.load_music("JDruid-Trip_on_the_Funny_Boat")
        if Variables.music:
            pygame.mixer.music.play(-1)
    except:
        # It's not a critical problem if there's no music
        pass

    pygame.time.set_timer(NEXTFRAME, 1000 / FPS) # 30 fps

    Water.global_water = Water()

    main_selection = 0

    while True:
        main_selection = Menu(screen, ("New Game", "High Scores", "Options", "Quit"), main_selection).run()
        if main_selection == 0:
            # New Game
            selection = Menu(screen, ("Story Mode", "Endless Mode")).run()
            if selection == 0:
                # Story
                score = Game(screen).run()
                Highscores(screen, score).run()
            elif selection == 1:
                # Endless
                score = Game(screen, True).run()
                Highscores(screen, score, True).run()
        elif main_selection == 1:
            # High Scores
            selection = 0
            while True:
                selection = Menu(screen, ("Story Mode", "Endless Mode", "Endless Online"), selection).run()
                if selection == 0:
                    # Story
                    Highscores(screen).run()
                elif selection == 1:
                    # Endless
                    Highscores(screen, endless = True).run()
                elif selection == 2:
                    # Online
                    Highscores(screen, endless = True, online = True).run()
                else:
                    break
        elif main_selection == 2:
            # Options
            selection = Options(screen).run()
        else: #if main_selection == 3:
            # Quit
            return
Exemple #59
0
                user_id = data.get('user', None)
                if user_id:
                    user = bot.slack_client.server.users.find(user_id)
                else:
                    user = None
                user  # usable, but not yet
                if message:
                    line = u'privmsg #{} :{}'.format(name(channel), message)
                    self.irc_client.send_line(line)
            else:
                line = u'privmsg #{} :{}'.format(self.config['irc'].get('nick', 'slairck'), unicode(data))
                self.irc_client.send_line(line)

if __name__ == "__main__":
    from util import main_loop

    config = load_config('irc')
    debug = config["DEBUG"]
    host = config['irc']['host']
    port = config['irc'].get('port', 6667)
    bot = IrcBot(host, port, config=config)
    site_plugins = []
    files_currently_downloading = []
    job_hash = {}

    if config.get('DAEMON', None):
        import daemon
        with daemon.DaemonContext():
            main_loop(bot, config)
    main_loop(bot, config)
Exemple #60
0
def main(models, source_file, saveto, save_alignment=None, k=5,
         normalize=False, n_process=5, chr_level=False, verbose=False, nbest=False, suppress_unk=False, a_json=False, print_word_probabilities=False, return_hyp_graph=False):
    # load model model_options
    options = []
    for model in models:
        options.append(load_config(model))

        fill_options(options[-1])

    dictionaries = options[0]['dictionaries']

    dictionaries_source = dictionaries[:-1]
    dictionary_target = dictionaries[-1]

    # load source dictionary and invert
    word_dicts = []
    word_idicts = []
    for dictionary in dictionaries_source:
        word_dict = load_dict(dictionary)
        if options[0]['n_words_src']:
            for key, idx in word_dict.items():
                if idx >= options[0]['n_words_src']:
                    del word_dict[key]
        word_idict = dict()
        for kk, vv in word_dict.iteritems():
            word_idict[vv] = kk
        word_idict[0] = '<eos>'
        word_idict[1] = 'UNK'
        word_dicts.append(word_dict)
        word_idicts.append(word_idict)

    # load target dictionary and invert
    word_dict_trg = load_dict(dictionary_target)
    word_idict_trg = dict()
    for kk, vv in word_dict_trg.iteritems():
        word_idict_trg[vv] = kk
    word_idict_trg[0] = '<eos>'
    word_idict_trg[1] = 'UNK'

    # create input and output queues for processes
    queue = Queue()
    rqueue = Queue()
    processes = [None] * n_process
    for midx in xrange(n_process):
        processes[midx] = Process(
            target=translate_model,
            args=(queue, rqueue, midx, models, options, k, normalize, verbose, nbest, save_alignment is not None, suppress_unk, return_hyp_graph))
        processes[midx].start()

    # utility function
    def _seqs2words(cc):
        ww = []
        for w in cc:
            if w == 0:
                break
            ww.append(word_idict_trg[w])
        return ' '.join(ww)

    def _send_jobs(f):
        source_sentences = []
        for idx, line in enumerate(f):
            if chr_level:
                words = list(line.decode('utf-8').strip())
            else:
                words = line.strip().split()

            x = []
            for w in words:
                w = [word_dicts[i][f] if f in word_dicts[i] else 1 for (i,f) in enumerate(w.split('|'))]
                if len(w) != options[0]['factors']:
                    sys.stderr.write('Error: expected {0} factors, but input word has {1}\n'.format(options[0]['factors'], len(w)))
                    for midx in xrange(n_process):
                        processes[midx].terminate()
                    sys.exit(1)
                x.append(w)

            x += [[0]*options[0]['factors']]
            queue.put((idx, x))
            source_sentences.append(words)
        return idx+1, source_sentences

    def _finish_processes():
        for midx in xrange(n_process):
            queue.put(None)

    def _retrieve_jobs(n_samples):
        trans = [None] * n_samples
        out_idx = 0
        for idx in xrange(n_samples):
            resp = rqueue.get()
            trans[resp[0]] = resp[1]
            if verbose and numpy.mod(idx, 10) == 0:
                sys.stderr.write('Sample {0} / {1} Done\n'.format((idx+1), n_samples))
            while out_idx < n_samples and trans[out_idx] != None:
                yield trans[out_idx]
                out_idx += 1

    sys.stderr.write('Translating {0} ...\n'.format(source_file.name))
    n_samples, source_sentences = _send_jobs(source_file)
    _finish_processes()

    for i, trans in enumerate(_retrieve_jobs(n_samples)):
        if nbest:
            samples, scores, word_probs, alignment, hyp_graph = trans
            if return_hyp_graph:
                renderer = HypGraphRenderer(hyp_graph)
		renderer.wordify(word_idict_trg)
                renderer.save_png(return_hyp_graph, detailed=True, highlight_best=True)
            order = numpy.argsort(scores)
            for j in order:
                if print_word_probabilities:
                    probs = " ||| " + " ".join("{0}".format(prob) for prob in word_probs[j])
                else:
                    probs = ""
                saveto.write('{0} ||| {1} ||| {2}{3}\n'.format(i, _seqs2words(samples[j]), scores[j], probs))
                # print alignment matrix for each hypothesis
                # header: sentence id ||| translation ||| score ||| source ||| source_token_count+eos translation_token_count+eos
                if save_alignment is not None:
                  if a_json:
                    print_matrix_json(alignment[j], source_sentences[i], _seqs2words(samples[j]).split(), i, i+j,save_alignment)
                  else:
                    save_alignment.write('{0} ||| {1} ||| {2} ||| {3} ||| {4} {5}\n'.format(
                                        i, _seqs2words(samples[j]), scores[j], ' '.join(source_sentences[i]) , len(source_sentences[i])+1, len(samples[j])))
                    print_matrix(alignment[j], save_alignment)
        else:
            samples, scores, word_probs, alignment, hyp_graph = trans
            if return_hyp_graph:
                renderer = HypGraphRenderer(hyp_graph)
		renderer.wordify(word_idict_trg)
                renderer.save_png(return_hyp_graph, detailed=True, highlight_best=True)
            saveto.write(_seqs2words(samples) + "\n")
            if print_word_probabilities:
                for prob in word_probs:
                    saveto.write("{} ".format(prob))
                saveto.write('\n')
            if save_alignment is not None:
              if a_json:
                print_matrix_json(alignment, source_sentences[i], _seqs2words(trans[0]).split(), i, i,save_alignment)
              else:
                save_alignment.write('{0} ||| {1} ||| {2} ||| {3} ||| {4} {5}\n'.format(
                                      i, _seqs2words(trans[0]), 0, ' '.join(source_sentences[i]) , len(source_sentences[i])+1, len(trans[0])))
                print_matrix(alignment, save_alignment)

    sys.stderr.write('Done\n')