示例#1
0
def run_kmeans(X, plot=False):
    """ My solution:
    for i in range(len(K)):
        for j in range(len(seed)):
            mixture, post = common.init(X, K[i], seed[j])
            mixture, post, cost = kmeans.run(X, mixture, post)
            print("K = {}, seed = {}, cost = {}".format(K[i], seed[j], cost))
            if plot:
                common.plot(X, mixture, post, "K={}, seed={}".format(K[i], seed[j]))
    """
    # Instructor's solution:
    for K in range(1, 5):
        min_cost = None
        best_seed = None
        for seed in range(0, 5):
            mixture, post = common.init(X, K, seed)
            mixture, post, cost = kmeans.run(X, mixture, post)
            if min_cost is None or cost < min_cost:
                min_cost = cost
                best_seed = seed

        mixture, post = common.init(X, K, best_seed)
        mixture, post, cost = kmeans.run(X, mixture, post)
        title = "K-means for K=, seed=, cost=".format(K, best_seed, min_cost)
        print(title)
        common.plot(X, mixture, post, title)
示例#2
0
文件: main.py 项目: tritus/ml-courses
def test_seeds(X, K):
    print("\n############## KMEAN K=" + str(K) + " ###############")

    mixture0, post0 = common.init(X, K, 0)
    mixture1, post1 = common.init(X, K, 1)
    mixture2, post2 = common.init(X, K, 2)
    mixture3, post3 = common.init(X, K, 3)
    mixture4, post4 = common.init(X, K, 4)

    cost0 = kmeans.run(X, mixture0, post0)[2]
    cost1 = kmeans.run(X, mixture1, post1)[2]
    cost2 = kmeans.run(X, mixture2, post2)[2]
    cost3 = kmeans.run(X, mixture3, post3)[2]
    cost4 = kmeans.run(X, mixture4, post4)[2]

    print("K=" + str(K) + " seed=0 : cost=" + str(cost0))
    print("K=" + str(K) + " seed=1 : cost=" + str(cost1))
    print("K=" + str(K) + " seed=2 : cost=" + str(cost2))
    print("K=" + str(K) + " seed=3 : cost=" + str(cost3))
    print("K=" + str(K) + " seed=4 : cost=" + str(cost4))

    naive_em_estimate0 = naive_em.run(X, mixture0, post0)
    naive_em_estimate1 = naive_em.run(X, mixture1, post1)
    naive_em_estimate2 = naive_em.run(X, mixture2, post2)
    naive_em_estimate3 = naive_em.run(X, mixture3, post3)
    naive_em_estimate4 = naive_em.run(X, mixture4, post4)

    print("K=" + str(K) + " seed=0 : likelihood=" + str(naive_em_estimate0[2]))
    print("K=" + str(K) + " seed=1 : likelihood=" + str(naive_em_estimate1[2]))
    print("K=" + str(K) + " seed=2 : likelihood=" + str(naive_em_estimate2[2]))
    print("K=" + str(K) + " seed=3 : likelihood=" + str(naive_em_estimate3[2]))
    print("K=" + str(K) + " seed=4 : likelihood=" + str(naive_em_estimate4[2]))
示例#3
0
 def test(self):
     try:
         this_dir = os.path.dirname(os.path.abspath(__file__))
         init(visualiser_args=['--maps', '{}/RRT_test_map_2D.py'.format(this_dir)])
         graphics_test()
     finally:
         destroy()
示例#4
0
def run():
    """
    :param app_name: HPC application
    :param perf_coln: performance name to be optimized
    :param num_core: number of CPU cores
    :param num_node: number of computing nodes
    :param rand_seed: random seed
    :param num_smpl: number of samples
    :param pool_size: pool size
    :param num_iter: number of iterations
    :param prec_rand: precentage of random samples
    """
    try:
        cm.init()
        app_name = cm.app_name
        perf_coln = cm.perf_coln
        num_smpl = cm.num_smpl
        pool_size = cm.pool_size
        num_iter = cm.num_iter
        prec_rand = cm.prec_rand
    
        if (app_name == "lv"):
            conf_colns = data.lv_conf_colns 
        elif (app_name == "hs"):
            conf_colns = data.hs_conf_colns

        num_rand = int(num_smpl * prec_rand)
        nspi = int((num_smpl - num_rand) / num_iter)
        # pool_df = data.gen_smpl(app_name, pool_size)
        # conf_df = pool_df.head(num_rand)
        conf_df = data.gen_smpl(app_name, num_rand)
        train_df = cm.measure_perf(conf_df)

        for iter_idx in range(num_iter):
            num_curr = num_smpl - nspi * (num_iter - 1 - iter_idx)
 
            pool_df = data.gen_smpl(app_name, pool_size)
            pred_top_smpl = learn.whl_pred_top_eval(train_df, pool_df, conf_colns, perf_coln, num_smpl, 0)
            pred_top_smpl = pred_top_smpl.sort_values([perf_coln]).reset_index(drop=True)
            new_conf_df = pred_top_smpl[conf_colns].head(nspi)
            conf_df = tool.df_union(conf_df, new_conf_df) 
    
            last = nspi
            while (conf_df.shape[0] < num_curr):
                last = last + 1
                new_conf_df = pred_top_smpl[conf_colns].head(last)
                conf_df = tool.df_union(conf_df, new_conf_df)
    
            new_train_df = cm.measure_perf(new_conf_df)
            train_df = tool.df_union(train_df, new_train_df)
    
        data.df2csv(train_df, app_name + "_train.csv")
        mdl_chk, mdl = learn.train_mdl_chk(train_df, conf_colns, perf_coln)
        top_df = cm.find_top('ALe', (mdl_chk, mdl, ), conf_colns, perf_coln, train_df)
    
        cm.test(train_df, conf_colns, perf_coln)
        cm.finish(train_df, top_df)
    except:
        traceback.print_exc()
示例#5
0
 def test_common_init(self):
     layer_widths=[32, 16, 8, 7, 6, 5, 4, 3, 2, 1]
     centroids=   [2,   8, 32,32,64,16,16,16,16,4]
     cm.init(centroids=centroids,
             video_file="moving_square.avi",
             learn_rate=0.05,
             layer_widths=layer_widths,
             img_width=512
             )
示例#6
0
    def __init__(self, parent):
        """Initializes the Background tab.

        Args:
            parent (App(QDialog)): Object corresponding to the parent UI element.
        """
        self.parent = parent
        self.tag = "bg"
        self.dlg = parent.dlg
        common.init(self)
示例#7
0
    def __init__(self, parent):
        """Initializes the DepthEstimation tab.

        Args:
            parent (App(QDialog)): Object corresponding to the parent UI element.
        """
        self.parent = parent
        self.tag = "depth"
        self.dlg = parent.dlg
        common.init(self)
示例#8
0
    def __init__(self, parent):
        """Initializes the Export tab.

        Args:
            parent (App(QDialog)): Object corresponding to the parent UI element.
        """
        self.parent = parent
        self.tag = "export"
        self.dlg = parent.dlg
        common.init(self)
        self.initialize_viewer_buttons()
示例#9
0
文件: main.py 项目: SYYoung/MIT
def run_kmeans():
    for K in range(1, 5):
        min_cost = None
        best_seed = None
        for seed in range(0, 5):
            mixture, post = common.init(X, K, seed)
            mixture, post, cost = kmeans.run(X, mixture, post)
            if min_cost is None or cost < min_cost:
                min_cost = cost
                best_seed = seed

        mixture, post = common.init(X, K, best_seed)
        mixture, post, cost = kmeans.run(X, mixture, post)
        title = "K-means for K={}, seed={} , cost= {}".format(K, best_seed, min_cost)
        common.plot(X, mixture, post, title)
def run_kmean(X):
    for K in [1,2,3,4]:
        cost_list = []
        for seed in range(5):
            mixture, post = common.init(X, K, seed)
            mixture, post, cost = kmeans.run(X, mixture, post)
            cost_list.append(cost)
            #common.plot(X, mixture, post, "{} means with seed{}".format(K, seed))
        print("The cost of {} cluster is".format(K), min(cost_list))
        best_seed = np.argmin(cost_list)
        for seed_ in [best_seed]:
            mixture, post = common.init(X, K, int(seed_))
            mixture, post, cost = kmeans.run(X, mixture, post)
            common.plot(X, mixture, post, "{} means with seed{}".format(K, seed_))
    return "Done"
示例#11
0
def main():

	print("Booting")

	common.init()

	telegram_bot_token=""
	twitter_consumer_key=""
	twitter_consumer_secret=""
	twitter_access_token=""
	tiwtter_access_secret=""

	# READ API TOKENS FROM FILE
	with open('tokens.json') as token_file:
		token_data = json.load(token_file)
		telegram_bot_token = token_data['telegram_bot_token']
		twitter_consumer_key = token_data['consumer_key']
		twitter_consumer_secret = token_data['consumer_secret']
		twitter_access_token = token_data['access_token']
		tiwtter_access_secret = token_data['access_token_secret']

	follow_list = []
	with open('follow_list.json') as follow_list_file:
		follow_data = json.load(follow_list_file)
		follow_list = follow_data['follow_list']

	#AUTH
	auth = tweepy.OAuthHandler(twitter_consumer_key, twitter_consumer_secret)
	auth.set_access_token(twitter_access_token, tiwtter_access_secret)

	api = tweepy.API(auth)

	follow_list_ids = []
	for item in follow_list:
		user = api.get_user(item)
		follow_list_ids.append(str(user.id))

	stream = ""
	try:
		#SET UP STREAM
		streamListener = TweetsStreamListener()
		stream = tweepy.Stream(auth=api.auth, listener=streamListener)
		stream.filter(follow=follow_list_ids, async=True)
		bot.bot_main(telegram_bot_token)
	except KeyboardInterrupt:
		print("KeyboardInterrupt")
		stream.disconnect()
		print("Disconnected from stream")
def main():
    global path

    logger = common.configLogger()
    common.init(logger)

    try:
        if "/build" not in path:
            msg = ("ERROR: exec_command() -"
                   " build all faild.\n\nOutput msg:\n{}")
            msgf = msg.format(path)
            logger.info(msgf)
            raise AssertionError(msgf)
        bakeSrcCode(logger)
    except:
        logger.info(traceback.print_exc())
示例#13
0
def main():
    common.parser().add_option("--indb", dest="indb", help="Input database")

    if not common.init(__file__):
        exit(2)

    progname = common.progname()
    indb = common.options().indb
    if not indb:
        indb = common.testdir() + "/pgn/Kramnik.pgn"
    temppgn1 = common.tmpdir() + "/copydb1.pgn"
    temppgn2 = common.tmpdir() + "/copydb2.pgn"
    tempcfdb1 = common.tmpdir() + "/copydb1.cfdb"

    if run(1, indb, tempcfdb1) and run(2, tempcfdb1, temppgn1) and run(3, indb, temppgn2):
        # temppgn1 and temppgn2 should be identical
        if os.path.exists(temppgn1) and os.path.exists(temppgn2):
            lines1 = open(temppgn1, "U").readlines()
            lines2 = open(temppgn2, "U").readlines()
            diff = difflib.unified_diff(lines1, lines2)
            different = False
            first = True
            for line in diff:
                if first:
                    print "{0}: PGN files {1} and {2} are different:".format(progname, temppgn1, temppgn2)
                    different = True
                    first = False
                print line
            if not different:
                print "{0}: Test successful".format(progname)
        else:
            print "{0}: Failed; no output files to compare".format(progname)
示例#14
0
def r_tag(guid):
	client = init()
	tag = client.get_tag(guid)
	if not tag: abort(404)
	data = globaldata()
	data.tag = tag
	
	if data.user:
		modify_tag(client, data)
	
	data.cloud = tagcloud([guid])
	data.q = tag.name
	data.tagtypes = client.metalist(u"tagtypes")
	
	def get_impl(rev):
		res = []
		for i in client.tag_implies(guid, reverse=rev) or []:
			name = tagname(i.guid)
			res.append(ImplicationTupleWithName(*(i + (name,))))
		return res
	
	data.implies_tags = get_impl(False)
	data.implied_by_tags = get_impl(True)
	
	order = "group" if tag.ordered else "aaaaaa-aaaac8-faketg-bddate"
	props = DotDict()
	posts = client.search_post(guids=[guid], order=order, range=[0, per_page - 1], wanted=wanted, props=props)
	data.posts = posts
	data.result_count = props.result_count
	data.page = 0
	if posts:
		data.pagelink = makelink(u'search', (u'q', tag.name))
		data.pages, data.rels = pagelinks(data.pagelink, 0, data.result_count)
	return data
示例#15
0
def setupCommon():
    from datetime import datetime
    bottle.request.session = bottle.request.environ['beaker.session']
    try:
        user = aaa.current_user.username
    except:
        user = '******'
    workDB = bottle.request.query.workDB
    if (not workDB) and ('workDB' in bottle.request.session):
        workDB = bottle.request.session['workDB']
    matchDB = bottle.request.query.matchDB
    if (not matchDB) and ('matchDB' in bottle.request.session):
        matchDB = bottle.request.session['matchDB']
    if workDB:
        #KOLLA possibly store mongoClient in session? Param to init?
        common.config = common.init(workDB, matchDBName = matchDB)
        bottle.request.session['workDB'] = workDB
        bottle.request.session['matchDB'] = matchDB
        bottle.request.session.save()
        if conf.config.logging and ('action' in bottle.request.url or
                                    'runProg' in bottle.request.url):
            rec = {'type': 'admin', 'time': time.time(), 'workDB': workDB,
                   'matchDB': matchDB, 'url': bottle.request.url,
                   'from': bottle.request.remote_addr, 'user': user}
            common.config['originalData'].insert_one(rec)
    bottle.response.set_header("Cache-Control", "no-cache")
    #print user, bottle.request.remote_addr, str(datetime.now()), bottle.request.url
    logging.info('%s %s %s', user, bottle.request.remote_addr, bottle.request.url)
def main():
    global path

    logger = common.configLogger()
    common.init(logger)

    try:
        if "/build" not in path:
            msg = ("ERROR: exec_command() -"
                   " build all faild.\n\nOutput msg:\n{}")
            msgf = msg.format(path)
            logger.info(msgf)
            raise AssertionError(msgf)
        bakeSrcCode(logger)
    except:
        logger.info(traceback.print_exc())
示例#17
0
def test_em():
    init_mixture, post = common.init(X, K, seed)
    mixture, post, c = em.run(X, init_mixture, post)

    prediction = em.fill_matrix(X, mixture)
    print(c)
    print(common.rmse(prediction, X_gold))
def run_naive_em(X):
    for K in [1,2,3,4]:
        likelihood_ls = []
        for seed in range(5):
            mixture, post = common.init(X, K, seed)
            mixture, post, LL = naive_em.run(X,mixture,post)
            likelihood_ls.append(LL)


        print("The likelihood of {} cluster is".format(K), max(likelihood_ls))
        best_seed = np.argmax(likelihood_ls)
        for seed_ in [best_seed]:
            mixture, post = common.init(X, K, int(seed_))
            mixture, post, LL= naive_em.run(X, mixture, post)
            common.plot(X, mixture, post, "{} mixtures with seed{}".format(K, seed_))
    return "Done"
示例#19
0
def run_naive_em():
    for K in range(1, 5):
        max_ll = None
        best_seed = None
        for seed in range(0, 5):
            mixture, post = common.init(X, K, seed)
            mixture, post, ll = naive_em.run(X, mixture, post)
            if max_ll is None or ll > max_ll:
                max_ll = ll
                best_seed = seed

        mixture, post = common.init(X, K, best_seed)
        mixture, post, ll = naive_em.run(X, mixture, post)
        title = "EM for K={}, seed={}, ll={}".format(K, best_seed, ll)
        print(title)
        common.plot(X, mixture, post, title)
示例#20
0
文件: epd.py 项目: gkalab/ChessCore
def main():
    common.parser().add_option("--epdfile", dest = "epdfile", help = "Input EPD file")
    common.parser().add_option("--timecontrol", dest="timecontrol", default = "10s", help = "Time control")
    common.parser().add_option("--engine", dest="engine", help = "The engine to use to process the EPD file")

    if not common.init(__file__):
        exit(2)

    progname = common.progname()
    debuglog = True
    logcomms = False
    logfile = common.tmpdir() + "/ccore.log"
    configfile = common.configfile()
    epdfile = common.options().epdfile
    if not epdfile:
        usage()
    timecontrol = common.options().timecontrol
    engine = common.options().engine
    if not engine:
        engine = common.engine1()

    if os.path.exists(logfile):
        os.remove(logfile)

    # Take a copy of the database
    cmdline = common.ccore()
    if debuglog:
        cmdline += " --debuglog true"
    if logcomms:
        cmdline += " --logcomms true"
    cmdline += " -c {0} -l {1} -e {2} -t {3} processepd {4}".format(configfile, logfile, epdfile, timecontrol, engine);
    if common.runccore(cmdline):
        common.checkLogfile(logfile)
示例#21
0
def train():
    args = init()
    global_epoch = args.init_epoch
    global_steps = 0
    gen_path = args.outpath

    if not isdir(gen_path):
        mkdir(gen_path)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        train_writer = tf.summary.FileWriter(args.model_root, sess.graph)

        for e in range(args.init_epoch, args.epochs):
            global_epoch += 1
            for i, (data, y) in enumerate(
                    data_generator(args.data_root, args.label_path)):
                if y.size == 0:
                    continue

                #data[np.where(data < 255)] = 0.
                #data = data.astype(np.float32)
                data = data.reshape(batch, size)

                _, _dis_loss, summary = sess.run([opt_dis, dis_loss, merged],
                                                 feed_dict={
                                                     real_x:
                                                     data,
                                                     gen_data:
                                                     rand_sample([batch, size])
                                                 })
                train_writer.add_summary(summary, global_steps)

                _, _gen_loss, summary = sess.run([opt_gen, gen_loss, merged],
                                                 feed_dict={
                                                     real_x:
                                                     data,
                                                     gen_data:
                                                     rand_sample([batch, size])
                                                 })
                train_writer.add_summary(summary, global_steps)

                if _dis_loss is None or _gen_loss is None:
                    print('[{}/{}] dis_loss:{} gen_loss:{}'.format(
                        global_epoch, i + 1, _dis_loss, _gen_loss),
                          flush=True)
                    return

                print('[{}/{}] dis_loss:{:.4} gen_loss:{:.4}'.format(
                    global_epoch, i + 1, _dis_loss, _gen_loss),
                      flush=True)

                sample = sess.run(
                    fake_x, feed_dict={gen_data: rand_sample([batch, size])})

                ind = i % gen_sample_nr
                i // gen_sample_nr and np.save('{}/{}'.format(gen_path, ind),
                                               sample) or None
                global_steps += 1
示例#22
0
def run_matrix_completion():
    K = 12
    seed = 1
    mixture, post = common.init(X, K, seed)
    mixture, post, ll = em.run(X, mixture, post)
    X_pred = em.fill_matrix(X, mixture)
    X_gold = np.loadtxt('netflix_complete.txt')
    print("RMSE:", common.rmse(X_gold, X_pred))
示例#23
0
def colormap(force=False, view=False):
    '''
    return cached file or create a new one
    '''
    logging.info('colormap() called, force: %s, view: %s', force, view)
    init()
    print('content-type: text/json\r\n\r\n', end='')
    if os.path.exists(MAPFILE) and not force:
        logging.info('returning cached color map')
        with open(MAPFILE) as infile:
            colormap = infile.read()
    else:
        logging.info('creating new color map')
        colormap = json.dumps(create_colormap(view))
        with open(MAPFILE, 'w') as outfile:
            outfile.write(colormap)
    print(colormap, end='')
示例#24
0
def main():
    common.parser().add_option("--indb", dest = "indb", help = "Input database")
    common.parser().add_option("--outdb", dest = "outdb", help = "Output database")
    common.parser().add_option("--firstgame", dest="firstgame", help = "First game in input database to analyze")
    common.parser().add_option("--lastgame", dest="lastgame",help = "First game in input database to analyze")
    common.parser().add_option("--ecofile", dest = "ecofile", help = "The (.cfdb) database containing the ECO classification")

    if not common.init(__file__):
        exit(2)

    progname = common.progname()
    debuglog = True
    logcomms = False
    logfile1 = common.tmpdir() + "/ccore1.log"
    logfile2 = common.tmpdir() + "/ccore2.log"
    configfile = common.configfile()
    indb = common.options().indb
    if not indb:
        indb = common.testdir() + "/pgn/Boris_Spassky.pgn"
    outdb = common.options().outdb
    if not outdb:
        outdb = common.tmpdir() + "/classified.cfdb"
        if os.path.exists(outdb):
            os.remove(outdb)
    ecofile = common.options().ecofile
    if not ecofile:
        ecofile = common.testdir() + "/cfdb/eco.cfdb"
    firstgame = common.options().firstgame
    lastgame = common.options().lastgame

    if os.path.exists(logfile1):
        os.remove(logfile1)
    if os.path.exists(logfile2):
        os.remove(logfile2)

    # Take a copy of the database
    cmdline = common.ccore()
    if debuglog:
        cmdline += " --debuglog true"
    if logcomms:
        cmdline += " --logcomms true"
    if firstgame:
        cmdline += " -n {0}".format(firstgame)
    if lastgame:
        cmdline += " -N {0}".format(lastgame)
    cmdline += " -l {0} -i {1} -o {2} copydb".format(logfile1, indb, outdb);
    if common.runccore(cmdline):
        common.checkLogfile(logfile1)

        # And then classify the copy
        cmdline = common.ccore()
        if debuglog:
            cmdline += " --debuglog true"
        if logcomms:
            cmdline += " --logcomms true"
        cmdline += " -l {0} -i {1} -E {2} classify".format(logfile2, outdb, ecofile)
        if common.runccore(cmdline):
            common.checkLogfile(logfile2)
def best_run_em(X):
    K = 12
    dict = {}
    for seed in range(5):
        np.random.seed(seed)
        mixture, post = common.init(X, K, seed)
        mixture, post, LL = em.run(X, mixture, post)
        dict[LL] = (mixture, seed)
    return dict[min(dict.keys())]
def best_run_em(X):
    K = 12
    dict = {}
    likelihood_ls = []
    for seed in range(5):
        mixture, post = common.init(X, K, seed)
        mixture, post, LL = em.run(X, mixture, post)
        dict[LL] = mixture
    return dict[min(dict.keys())]
示例#27
0
def test_k12():
    lls = []
    for s in [0, 1, 2, 3, 4]:
        print(s)
        init_mixture, post = common.init(X, 12, s)
        model = em.run(X, init_mixture, post)
        lls.append(model)
    m, p, l = max(lls, key=lambda x: x[-1])
    prediction = em.fill_matrix(X, m)
    return common.rmse(prediction, X_gold)
def select_best_bic(X):
    bic_ls = []
    for K in [1,2,3,4]:
        likelihood_ls = []
        bic_ls_seed = []

        for seed in range(5):
            mixture, post = common.init(X, K, seed)
            mixture, post, LL = naive_em.run(X,mixture,post)
            likelihood_ls.append(LL)
            bic_ls_seed.append(common.bic(X, mixture, LL))

        best_seed = np.argmax(bic_ls_seed)

        mixture, post = common.init(X, K, int(best_seed))
        mixture, post, LL = naive_em.run(X, mixture, post)
        bic_ls.append(common.bic(X,mixture,LL))
    print("The best K is {} with bic {}".format(np.argmax(bic_ls)+1, max(bic_ls)))
    return "Done"
示例#29
0
def test_incomplete_em():
    for k_s in [1, 12]:
        lps = []
        for s in [0, 1, 2, 3, 4]:
            print(k_s, s)
            init_mixture, post = common.init(X, k_s, s)
            model = em.run(X, init_mixture, post)
            lps.append(model)
        best = max(lps, key=lambda x: x[-1])
        print(best[-1])
示例#30
0
文件: main.py 项目: tritus/ml-courses
def test_em_seeds(X, K):
    print("\n############## EM K=" + str(K) + " ###############")

    mixture0, post0 = common.init(X, K, 0)
    mixture1, post1 = common.init(X, K, 1)
    mixture2, post2 = common.init(X, K, 2)
    mixture3, post3 = common.init(X, K, 3)
    mixture4, post4 = common.init(X, K, 4)

    cost0 = em.run(X, mixture0, post0)[2]
    cost1 = em.run(X, mixture1, post1)[2]
    cost2 = em.run(X, mixture2, post2)[2]
    cost3 = em.run(X, mixture3, post3)[2]
    cost4 = em.run(X, mixture4, post4)[2]

    print("K=" + str(K) + " seed=0 : likelihood=" + str(cost0))
    print("K=" + str(K) + " seed=1 : likelihood=" + str(cost1))
    print("K=" + str(K) + " seed=2 : likelihood=" + str(cost2))
    print("K=" + str(K) + " seed=3 : likelihood=" + str(cost3))
    print("K=" + str(K) + " seed=4 : likelihood=" + str(cost4))
def run_em(X):
    for K in [1, 12]:
        likelihood_ls = []
        for seed in range(5):
            mixture, post = common.init(X, K, seed)
            mixture, post, LL = em.run(X, mixture, post)
            likelihood_ls.append(LL)


        print("The likelihood of {} cluster is".format(K), max(likelihood_ls))
    return "Done"
示例#32
0
def train():
    args = init()
    global_epoch = args.init_epoch
    global_steps = 0
    gen_path = args.outpath

    if not isdir(gen_path):
        mkdir(gen_path)

    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        train_writer = tf.summary.FileWriter(args.model_root, sess.graph)

        for e in range(args.init_epoch, args.epochs):
            global_epoch += 1
            for i, (data, y) in enumerate(
                    data_generator(args.data_root, args.label_path)):
                if y.shape == 0:
                    continue

                data = data.reshape(batch, x_dim)

                _, _vae_loss, summary = sess.run([opt, vae_loss, merged],
                                                 feed_dict={
                                                     real_x:
                                                     data,
                                                     z:
                                                     np.random.randn(
                                                         batch, z_dim)
                                                 })
                train_writer.add_summary(summary, global_steps)

                if _vae_loss is None:
                    print('[{}/{}] vae_loss:{}'.format(global_epoch, i + 1,
                                                       _vae_loss),
                          flush=True)
                    return

                print('[{}/{}] vae_loss:{:.4}'.format(global_epoch, i + 1,
                                                      _vae_loss),
                      flush=True)

                global_steps += 1
                if i // gen_sample_nr != 0:
                    continue

                sample = sess.run(fake_x,
                                  feed_dict={z: np.random.randn(batch, z_dim)})

                ind = i % gen_sample_nr
                i // gen_sample_nr and np.save('{}/{}'.format(gen_path, ind),
                                               sample) or None
            print('model saved @ {}'.format(saver.save(sess, args.model_root)),
                  flush=True)
示例#33
0
def run_naive_em_with_bic():
    max_bic = None
    for K in range(1, 5):
        max_ll = None
        best_seed = None
        for seed in range(0, 5):
            mixture, post = common.init(X, K, seed)
            mixture, post, ll = naive_em.run(X, mixture, post)
            if max_ll is None or ll > max_ll:
                max_ll = ll
                best_seed = seed

        mixture, post = common.init(X, K, best_seed)
        mixture, post, ll = naive_em.run(X, mixture, post)
        bic = common.bic(X, mixture, ll)
        if max_bic is None or bic > max_bic:
            max_bic = bic
        title = "EM for K={}, seed={}, ll={}, bic={}".format(K, best_seed, ll, bic)
        print(title)
        common.plot(X, mixture, post, title)
示例#34
0
def test_naive_em():
    for k in [1, 2, 3, 4]:
        para_list = []
        for seed in [0, 1, 2, 3, 4]:
            gm, post = common.init(X, k, seed)
            mixture, p, cost = naive_em.run(X, gm, post)
            para_list.append((mixture, p, cost))
        max_para = max(para_list, key=lambda x: x[2])
        common.plot(X, max_para[0], max_para[1],
                    'EM on toy data with {k}'.format(k=k))
    return max_para[0], max_para[1]
示例#35
0
def ajax_completetag():
	tag = request.query.q
	client = init()
	full_tag, alts = complete(client, tag)
	res = {}
	if full_tag[0] or alts:
		res["complete"] = full_tag[0]
		res["type"] = full_tag[1]
		if len(alts) > 20: alts = []
		res["alts"] = alts
	return res
示例#36
0
def run_matrix_completion():
    K = 12
    seed = 1
    mixture, post = common.init(X, K, seed)
    (mu, var, p), post, ll = em.run(X, mixture, post)
    # print('Mu:\n' + str(mu))
    # print('Var: ' + str(var))
    # print('P: ' + str(p))
    # print('post:\n' + str(post))
    # print('LL: ' + str(ll))
    X_pred = em.fill_matrix(X, common.GaussianMixture(mu, var, p))
    X_gold = np.loadtxt('netflix_complete.txt')
    print("MAE:", common.mae(X_gold, X_pred))
示例#37
0
文件: UI.py 项目: andersardo/gedMerge
def listdubl():
    if not bottle.request.query.workDB:
        if 'workDB' in bottle.request.session:
            bottle.request.query.workDB = bottle.request.session['workDB']
        else:
            return 'Databas I ej vald - programmet avslutas<br><a href="/">Tillbaka till startsida</a>'
    bottle.request.query.matchDB = bottle.request.query.workDB
    bottle.request.session['matchDB'] = bottle.request.query.workDB
    #need to re-init database collections
    common.config = common.init(bottle.request.query.workDB,
                                matchDBName = bottle.request.query.matchDB)
    #test if workDB matched against itself
    dbOK = getDBselect('listDubl', bottle.request.query.workDB,
             bottle.request.session['activeUser'], bottle.request.session['directory'])
    if dbOK == 'No valid choices':
        #if not run match
        return runprog('match')
    #else show list
    from uiUtils import persDisp
    tit = 'Lista från alternativ dubblettkontroll'
    if bottle.request.params.sortNS:
        sortVal = 'checked'
        sorting = [('nodesim', -1),('sortDubl', -1)]
    else:
        sortVal = ''
        sorting = [('sortDubl', -1),('nodesim', -1)]
    page = int(bottle.request.params.pageNo or '1')
    prevnext = bottle.request.params.page or ''
    if prevnext == 'prev': page += -1
    elif prevnext == 'next': page += 1
    else: page = 1
    if page <= 0: page = 1
    rows = [['#',u'Namn/refId', u'Född', u'Död','Score/NodeSim', u'Namn/refId', u'Född', u'Död','Visa']]
    i = (page-1)*10
    args = {'where': 'visa', 'what': '/view/persons', 'buttons': 'No'}
#    for mt in common.config['matches'].find({'nodesim': {'$gt': 0.3}}).sort([('nodesim', -1),('sortDubl', -1)]).limit( 50 ):
    tot = common.config['matches'].find({'nodesim': {'$gt': 0.3}}).count()
    for mt in common.config['matches'].find({'nodesim': {'$gt': 0.3}}).sort(sorting).skip((page-1)*10).limit( 10 ):
        #print mt['pwork']['name'],mt['pmatch']['name'], mt['sortDubl'], mt['nodesim']
        i += 1
        row = [str(i)]
        row.extend(persDisp(mt['pwork']))
        row.append(str(mt['sortDubl'])+'<br>'+str(mt['nodesim']))
        row.extend(persDisp(mt['pmatch']))
        args['wid'] = str(mt['workid'])
        args['mid'] = str(mt['matchid'])
        row.append('<button onclick="doAction('+str(args)+')">Visa</button>')
        rows.append(row)
    return bottle.template('dubl', title = tit, page = page, tot = tot, 
                           prow=rows, sort=sortVal)
示例#38
0
def ajax_tag():
	client = init()
	tags = request.forms.tags
	name = request.forms.name
	if name:
		try:
			type = request.forms.type
			client.add_tag(tag_clean(name), type)
			tags = name
		except Exception:
			msg = u'Failed to create'
	m = request.forms.m.split()
	full = set()
	weak = set()
	remove = set()
	failed = []
	for t in tags.split():
		tag = client.find_tag(tag_clean(t))
		if tag:
			p = tag_prefix(t)
			if p == "~":
				weak.add(tag)
			elif p == "-":
				remove.add(tag)
			else:
				full.add(tag)
		else:
			failed.append(t)
	res = {}
	msg = u''
	if full or weak or remove:
		client.begin_transaction()
		for p in map(client.get_post, m):
			if not p:
				msg = u'Posts missing?'
			elif tag_post(p, full, weak, remove):
				p = client.get_post(p.md5)
				res[p.md5] = tags_as_html(p)
		client.end_transaction()
	if not res and not msg and not failed and not (name and name[0] == '-'):
		msg = u'Nothing to do?'
	
	res = dict(failed=u' '.join(failed), m=res, msg=msg)
	if failed:
		res["types"] = tagtypes()
	return res
示例#39
0
def r_search():
	data = globaldata()
	client = init()
	def parse_tag(name):
		res = client.parse_tag(name, comparison=True)
		if res:
			guid, cmp, val = res
			tag = client.get_tag(guid, with_prefix=True)
			return (tag, cmp, val)
	try:
		page = max(0, int(request.query.page))
	except Exception:
		page = 0
	q = request.query.q.strip()
	data.tagnames = qa = q.split()
	data.tags = map(parse_tag, qa)
	data.q = q = u' '.join(qa)
	data.cloud = []
	data.result_count = 0
	ta = []
	for i, (tag, cmp, val) in enumerate(filter(None, data.tags)):
		if cmp:
			qa[i] = tag_prefix(qa[i]) + tag.name
		ta.append((tag, cmp, val))
	if ta or not q:
		if data.user and request.query.ALL:
			range = [0, 1 << 31 - 1]
			page = -1
		else:
			range = [per_page * page, per_page * page + per_page - 1]
		order = "aaaaaa-aaaac8-faketg-bddate"
		if ta and ta[0][0].ordered:
			order = "group"
		props = DotDict()
		ga = [(t.pguid, cmp, val) for t, cmp, val in ta]
		posts = client.search_post(guids=ga, order=order, range=range, wanted=wanted, props=props)
		print "mm"
		if posts:
			data.posts = posts
			data.result_count = props.result_count
			data.page = page
			data.pagelink = makelink(u'search', (u'q', q))
			data.pages, data.rels = pagelinks(data.pagelink, page, data.result_count)
			data.cloud = tagcloud(ga)
	return data
示例#40
0
def r_post_rotate():
	m = request.forms.post
	rot = int(request.forms.rot or 0)
	assert rot in (0, 90, 180, 270)
	if rot:
		client = init()
		post = client.get_post(m, wanted=["rotate", "ext", "width", "height"])
		props = DotDict()
		if rot in (90, 270):
			props.width, props.height = post.height, post.width
		prot = int(post.rotate)
		if prot == -1: prot = 0
		assert prot in (0, 90, 180, 270)
		rot = (prot + rot) % 360
		client.save_thumbs(m, None, post.ext, rot, True)
		props.rotate = rot
		client.modify_post(m, **props)
	redirect("post/" + m)
示例#41
0
def main():
    common.parser().add_option("--indb", dest = "indb", help = "Input database")
    common.parser().add_option("--outdb", dest = "outdb", help = "Output database")
    
    if not common.init(__file__):
        exit(2)

    progname = common.progname()
    debuglog = True
    logfile1 = common.tmpdir() + "/ccore1.log"
    logfile2 = common.tmpdir() + "/ccore2.log"
    indb = common.options().indb
    if not indb:
        indb = common.rootdir() + "/doc/OpeningClassification/eco.pgn"
    outdb = common.options().outdb
    if not outdb:
        outdb = common.testdir() + "/cfdb/eco.cfdb"

    if os.path.exists(logfile1):
        os.remove(logfile1)
    if os.path.exists(logfile2):
        os.remove(logfile2)
    if os.path.exists(outdb):
        os.remove(outdb)

    # Copy the database
    cmdline = common.ccore()
    if debuglog:
        cmdline += " --debuglog true"

    cmdline += " -l {0} -i {1} -o {2} copydb".format(logfile1, indb, outdb)
    if common.runccore(cmdline):
        common.checkLogfile(logfile1)

        # Create the opening tree in the output database
        cmdline = common.ccore()
        if debuglog:
            cmdline += " --debuglog true"
        cmdline += " -l {0} -i {1} -d 100 buildoptree".format(logfile2, outdb)
        if common.runccore(cmdline):
            common.checkLogfile(logfile2)
示例#42
0
def main():
    common.parser().add_option("--engine1", dest = "engine1", help = "Engine #1")
    common.parser().add_option("--engine2", dest = "engine2", help = "Engine #2")
    common.parser().add_option("--numgames", dest = "numgames", type = "int", default = 5, help = "Number of games")
    common.parser().add_option("--timecontrol", dest = "timecontrol", default = "30", help = "Time control")
    common.parser().add_option("--ecofile", dest = "ecofile", help = "The (.cfdb) database containing the ECO classification")
    common.parser().add_option("--logcomms", dest = "logcomms", type = "int", default = 0, help = "Log UCI comms")

    if not common.init(__file__):
        exit(2)

    progname = common.progname()
    debuglog = True
    logcomms = common.options().logcomms != 0
    logfile = common.tmpdir() + "/ccore.log"
    pgnfile = common.tmpdir() + "/games.pgn"
    configfile = common.configfile()
    ecofile = common.options().ecofile
    if not ecofile:
        ecofile = common.testdir() + "/cfdb/eco.cfdb"
    engine1 = common.options().engine1
    if not engine1:
        engine1 = common.engine1()
    engine2 = common.options().engine2
    if not engine2:
        engine2 = common.engine2()
    numgames = common.options().numgames
    timecontrol = common.options().timecontrol

    if os.path.exists(logfile):
        os.remove(logfile)

    cmdline = common.ccore()
    if debuglog:
        cmdline += " --debuglog true"
    if logcomms:
        cmdline += " --logcomms true"
    cmdline += " -c {0} -l {1} -o {2} -E {3} -n {4} -t {5} tournament {6} {7}".format(configfile, logfile, pgnfile, ecofile, numgames, timecontrol, engine1, engine2)
    if common.runccore(cmdline):
        common.checkLogfile(logfile)
示例#43
0
def r_post(m):
	client = init()
	post = client.get_post(m, wanted=wanted + ("width", "height", "imgdate", "ext", "rotate",), separate_implied=True)
	if not post: abort(404)
	data = globaldata()
	
	data.post = post
	data.q = ""
	data.extra_script = u"resize.js"
	data.tags = sorted(taglist(post, False) + taglist(post, True))
	data.rel_posts = [Post(md5=md5) for md5 in client.post_rels(m) or []]
	data.rels = []
	
	if post.rotate > 0:
		spec = u'%(width)dx%(height)d-%(rotate)d' % post
		data.svg = data.base + u'rotate/' + spec + u'/' + m + u'.' + post.ext
	
	data.ordered_tags = [t for t in post.tags if t.ordered]
	if data.ordered_tags:
		do_rel = (len(data.ordered_tags) == 1)
		for t in data.ordered_tags:
			posts = client.search_post(guids=[t.guid], order="group")
			pos = [p.md5 for p in posts].index(m)
			odata = [(u'dist2', None), (u'dist1', u'prev'), (u'dist0', None),
			         (u'dist1', u'next'), (u'dist2', None)]
			start, end = pos - 2, pos + 3
			if start < 0:
				odata = odata[-start:]
				start = 0
			t.relposts = posts[start:end]
			for p, d in zip(t.relposts, odata):
				p.reldist = d[0]
				if do_rel and d[1]:
					data.rels.append((d[1], p.md5))
	
	if 'aaaaaa-aaaadt-faketg-gpspos' in post.datatags:
		data.gps = post.datatags['aaaaaa-aaaadt-faketg-gpspos'].value
	
	return data
示例#44
0
def r_post_tag():
	client = init()
	m = request.forms.post
	post = client.get_post(m)
	tags = request.forms.tags
	create = [a.decode("utf-8") for a in request.forms.getall("create")]
	ctype  = [a.decode("utf-8") for a in request.forms.getall("ctype")]
	full = set()
	weak = set()
	remove = set()
	failed = []
	
	for n, t in zip(create, ctype):
		if t:
			client.add_tag(tag_clean(n), t)
			tags += u' ' + n
	for t in tags.split():
		tag = client.find_tag(tag_clean(t))
		if tag:
			p = tag_prefix(t)
			if p == "~":
				weak.add(tag)
			elif p == "-":
				remove.add(tag)
			else:
				full.add(tag)
		else:
			failed.append(t)
	
	tag_post(post, full, weak, remove)
	
	if not failed:
		redirect("post/" + m)
	
	data = globaldata()
	data.tagtypes = tagtypes()
	data.failed = failed
	data.m = m
	return data
示例#45
0
def r_image(m, ext):
	client = init()
	return serve(client.image_path(m), ext)
示例#46
0
dbName  = os.path.basename(workDB).split('.')[0]  #No '.' or '/' in databasenames
mDBname = os.path.basename(matchDB).split('.')[0]

#KOLLA imports
from matchUtils import *
from dbUtils import getFamilyFromId
from utils import matchFam, setFamOK, setEjOKfamily, setOKperson
from matchtext import matchtext
from luceneUtils import setupDir, search

mt_tmp = matchtext()

t0 = time.time()
logging.info('using db %s matching against %s', dbName, mDBname)
config = common.init(dbName, matchDBName=mDBname, indexes=True)
if featureSet:
    config['featureSet'] = featureSet
if famfeatureSet:
    config['famfeatureSet'] = famfeatureSet

common.config = config
setupDir(mDBname)

person_list = config['persons']
fam_list = config['families']

matches = config['matches']
matches.drop()
fam_matches = config['fam_matches']
fam_matches.drop()
示例#47
0
# -*- coding: utf-8 -*-
"""
Created on Sat May 18 20:45:29 2013

@author: ted
"""

import common as cm

cm.init(centroids=[4,8,32,32,64,32,4],
        video_file="color_4.avi", #moving_square.avi
        learn_rate=0.05)
                 
cm.video_source.enableDisplayWindow()
cm.network.setIsPOSTraining(True)

def callback(iter):
    cm.printStats()
    #cm.network.printBeliefGraph(cm.top_layer, 0, 0)
    print "iter:",iter
    cm.printFPS()

def showTree(index):
    if index >= tm.getMinedTreeCount():
        print "out of bounds"
        return
    tm.displayMinedTree(index)
    tm.printMinedTreeStructure(index)
    cm.wk()
    
def doTracking(delay=0):
示例#48
0
def thumb(m, z):
	client = init()
	if z in ("normal", "large"):
		return serve(client.pngthumb_path(m, z), "png")
	else:
		return serve(client.thumb_path(m, z), "jpeg")
示例#49
0
def ui_init():
    common.init()
示例#50
0
文件: docs.py 项目: iiiiiwan/python
# -*- coding: utf-8 -*-

# Import
import common
common.init(['/klass'])

if __name__ == '__main__':

  import Template
  import _Selenium
  import CustomSelenium

  # Docs - common.py
  print common.__doc__
  print '  ' + common.log.__doc__
  print '  ' + common.init.__doc__

  print '\n'

  # Docs - klass/Template.py
  print Template.__doc__
  print '  ' + Template.TemplateKlass.__doc__
  print '  ' + Template.TemplateKlass.getName.__doc__
  print '  ' + Template.TemplateKlass.setName.__doc__

  print '\n'

  # Docs - klass/_Selenium.py
  print _Selenium.__doc__
  print '  ' + _Selenium.SeleniumKlass.__doc__
  print '  ' + _Selenium.SeleniumKlass.getScreenShot.__doc__
示例#51
0
    gvFil.write( "}\n" )
    gvFil.close()
    os.system('dot -Tsvg -O '+filnamn)
    #print '  Img', filnamn
    fil = open(filnamn+'.svg' , 'rb')
    graph = fil.read()
    fil.close()
    return graph

if __name__=="__main__":
    import codecs, locale
    locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') #sorting??
    sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
    import common
    DB = 'anders_DavidEkedahlMaster'
    conf = common.init(DB, matchDBName = DB, indexes=True)
    personDB = conf['persons']
    familyDB = conf['families']
    relationDB = conf['relations']
    origDB = conf['originalData']
    (childErr, famErr, relErr) = sanity(personDB, familyDB, relationDB)
    print 'Only child in one family', len(childErr)
    notFixed = repairChild(childErr, personDB, familyDB, relationDB, origDB)
    print 'Not fixed', len(notFixed)

    print 'Multi husb/wife in one family'
    repairFam(famErr, personDB, familyDB, relationDB, origDB)

    print 'Rel err'
    repairRel(relErr, personDB, familyDB, relationDB, origDB)
示例#52
0
#!/usr/bin/env python

from twisted.internet import reactor


# Setup twisted logging
from twisted.python import log
import sys
log.startLogging(sys.stdout)


# Start up the common part
import common
common.init(reactor)


# CherryPy-in-Twisted setup
import cherrypy
cherrypy.config.update({
   'environment': 'embedded',
   'log.screen': True
})
# We need to unsubscribe the CherryPy server to prevent a port conflict
cherrypy.server.unsubscribe()
# Start CherryPy internals
cherrypy.engine.start()
# Make sure we shut down CherryPy
reactor.addSystemEventTrigger('after', 'shutdown', cherrypy.engine.exit)


# The RFB proxy
示例#53
0
    sys.exit()

#use username and first part of filename as databasename
dbName = user + '_' + os.path.basename(fn).split('.')[0]
logging.info('Using database %s importing from file %s', dbName, fn)

#Read mappings
import json
(fndir,tmp) = os.path.split(fn)
namMap = json.load(open(fndir + '/name.dat'))
placMap = json.load(open(fndir + '/plac.dat'))
datMap = json.load(open(fndir + '/date.dat'))
sourMap = json.load(open(fndir + '/sour.dat'))
##

config = common.init(dbName, dropWorkDB=True, indexes=True)
persons = config['persons']
families = config['families']

t0 = time.time()
logging.info('Reading and parsing gedcom')

try:
    people = Gedcom(fn)
except Exception, e:
    logging.error('<h1>Fatalt fel vid import av Gedcom</h1>')
    exc_type, exc_value, exc_traceback = sys.exc_info()
    traceback.print_exception(exc_type, exc_value, exc_traceback)
    sys.exit()
logging.info('Time %s',time.time() - t0)
示例#54
0
    if args.json:
        try:
            result, info = do_query()
            print json.dumps({ 'status' : 'ok', 'result' : result })
        except pymoira.BaseError as e:
            print json.dumps({ 'status' : 'error', 'message' : str(e) })
    else:
        try:
            # Those are not real queries, and QueryInfo() would fail for them
            if args.query == '_help':
                show_help()
                return
            if args.query == '_list_queries':
                show_queries_list()
                return
            if args.query == '_list_users':
                show_user_list()
                return

            result, info = do_query()
            for row in result:
                fields = [ (field_name, row[field_name]) for field_name in info.outputs ]
                common.show_fields(*fields)
                print ""
        except pymoira.BaseError as err:
           common.error(err)

if __name__ == '__main__':
    client, args = common.init('mrquery', 'Send raw queries to Moira', setup_arguments)
    handle_query()
示例#55
0
文件: mruser.py 项目: vasilvv/mrtools
        ('Alternate email', user.alternate_email) if user.alternate_email else None,
        ('Alternate phone', user.alternate_phone) if user.alternate_phone else None,
        ('Created', "%s by %s" % (common.last_modified_date(user.created_date), user.created_by)),
        ('Last modified', "%s by %s using %s" % (common.last_modified_date(user.lastmod_datetime), user.lastmod_by, user.lastmod_with)),
    )

def show_ownerships():
    """Handle 'mruser ownerships'."""

    user = User(client, args.user)
    ownership.show_ownerships(client, args, user)

def setup_subcommands(argparser):
    """Sets up all the subcommands."""

    subparsers = argparser.add_subparsers()

    parser_info = subparsers.add_parser('info', help = 'Provide the information about the user')
    parser_info.add_argument('user', help = 'The user to inspect')

    parser_ownerships = subparsers.add_parser('ownerships', help = 'Show items which this user owns')
    parser_ownerships.add_argument('user', help = 'The name of the user to show information about')
    parser_ownerships.add_argument('-r', '--recursive', action = 'store_true', help = 'Show items which this user own through being in lists')
  
    parser_info.set_defaults(handler = show_info)
    parser_ownerships.set_defaults(handler = show_ownerships)

if __name__ == '__main__':
    client, args = common.init('mruser', 'Inspect Moira users', setup_subcommands)
    common.main()
示例#56
0
	root = Tk()

	common.error("pygtk is not available. Please install pygtk on your system.")
	
	SimpleDialog(root,
             i18n.get_text(common.get_user_lang(), 103) + "\n" + i18n.get_text(common.get_user_lang(), 104),
             ["OK"],
             0,
             title="Omnitux").go()

	exit()


common.info("Starting Omnitux")

globalvars = common.init()

common.info("Omnitux release = "+globalvars.release)

common.info("Version = "+constants.version)

common.info("Showing screen")
pygame.display.flip()


# activities and other stuff are imported here and not at the beginning

# the idea is to show a black screen as soon as possible so that
# the user won't click again on the omnitux icons_amount
# (especially for slow systems)
示例#57
0
# -*- coding: utf-8 -*-
# This Python file uses the following encoding: utf-8
"""
export a database as Gedcom
"""

import argparse, sys, os, datetime, re
from collections import defaultdict
import pickle
parser = argparse.ArgumentParser()
parser.add_argument("workDB", help="Working database name" )
args = parser.parse_args()
workDB = args.workDB
dbName  = os.path.basename(workDB).split('.')[0]  #No '.' or '/' in databasenames
import common
config = common.init(dbName, indexes=True)
from common import RGDadm 
import codecs, locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') #sorting??
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)

import json
from workFlow import workFlowUI
from dbUtils import getFamilyFromId
user = workDB.split('_')[0]
(files, dbs, workingDir, activeUser) = workFlowUI(user, None)

sourMap = {}
cIdMap = {}
for rec in config['originalData'].find({'type': 'admin'}):
    if 'cId' in rec:
示例#58
0
"""
Created on Sat May 18 20:45:29 2013

@author: ted
"""

import common as cm
import pydestin as pd

# load the layer_widths and centroids from the
# object_tracking_config.py
from object_tracking_config import *

isTraining = True  # train from scratch, or reload from previous run

cm.init(centroids=centroids, video_file="moving_square.avi", learn_rate=0.05, layer_widths=layer_widths, img_width=256)
#

cm.video_source.enableDisplayWindow()
cm.network.setIsPOSTraining(True)

write_video = True

video_writer = pd.VideoWriter("obtracking_output.avi", 15)


callback_iters = 0


def callback(iter):
    # cm.network.printBeliefGraph(cm.top_layer, 0, 0)