Ejemplo n.º 1
0
def reset(ctx):
    ctx.status("loading deployment config")
    fp = os.path.join(_DEPLOYMENT_CONFIGS_DIR, args.deployment_config)
    ctx.deployment_config = deployment_config.DeploymentConfig(
        fp, get_provider_profiles())
    ctx.status_ok()
    load.load(ctx)
Ejemplo n.º 2
0
Archivo: main.py Proyecto: Velnbur/labs
def process(path_to_ini: str):
    """ Program main function """
    input_paths, output_path = load_ini(path_to_ini)
    records = Records()
    load(records, input_paths["csv"], input_paths["json"],
         input_paths["encoding"])
    records.output(output_path["fname"], output_path["encoding"])
Ejemplo n.º 3
0
    def test_load(self):
        filename = "non-existent-file.txt"
        # test a non-existent file
        self.assertEqual(load(filename), "No save file exists.")
        # see if a save file exists at all

        filename = "save.txt"
        self.assertTrue(os.path.exists(filename))

        for trial in range(6):
            if trial != 0:
                save_file = open("save.txt", "w+")
                save_file.write(str(trial))
                save_file.close()

                print("test is: " + str(trial))
                self.assertEqual(load("save.txt"), "success")

        bad_states = [-1, 50, 79, "egg"]
        for trial in range(len(bad_states)):
            save_file = open("save.txt", "w+")
            save_file.write(str(bad_states[trial]))
            save_file.close()

            print("test is: " + str(bad_states[trial]))
            self.assertEqual(
                load("save.txt"),
                "Save file is corrupted. Please start a new game.")
Ejemplo n.º 4
0
 def load(self, path:str):
     '''
     loading of the dumped data
     '''
     if not path:
         raise Exception('path is not defined')
     load(path)
Ejemplo n.º 5
0
        def init():
            #初始化参数
            self.clear()
            self.downloaded = True
            self.i = 0
            self.len = 0

            load.load()
            self.ISBN = load.get_ISBN()
            self.bookname = load.get_booname()
            self._class = load.get_class()
            self.publish = load.get_publish()
            self.original_price = load.get_original_price()
            self.dd_price = load.get_dd_price()
            self.date = load.get_date()
            self.goodrate = load.get_goodrate()
            self.img = load.get_img()
            self.author = load.get_author()
            self.authorInfo = load.get_authorInfo()
            self.customer = load.get_customer()
            self.comment = load.get_comments()
            self.content = load.get_content()
            self.len = len(self.ISBN)
            #print(len(self.ISBN))
            if self.downloaded:
                for i in range(self.len):
                    image_download.save(self.img[i], i)
                self.downloaded = False
            change()
Ejemplo n.º 6
0
 def __init__(self, ori_data, loading, **kwarg):
     """
     """
     self.ori_data = load(ori_data)
     if not loading:
         self.shap_data = kwarg.get('shap_data')
     else:
         self.shap_data = load(kwarg.get('shap_data'))
Ejemplo n.º 7
0
def main(ctx):
    if ctx.args.reset:
        ctx.status("loading deployment config")
        fp = os.path.join(_DEPLOYMENT_CONFIGS_DIR, args.deployment_config)
        ctx.deployment_config = deployment_config.DeploymentConfig(fp)
        ctx.status_ok()
        load.load(ctx)
    find_claims(ctx)
Ejemplo n.º 8
0
def do_etl(echo=False, inital_load=False):
    from load import load
    from extraction import extraction
    from transformation import transformation

    run_id = extraction(echo=echo)

    transformation(echo=echo, inital_load=inital_load, run_id=run_id)

    load(echo=echo, run_id=run_id)
Ejemplo n.º 9
0
 def __init__(self, data, read_file, mode):
     if mode == 'Compute':
         self.data = pd.DataFrame(data)
     else:
         self.data = load(data)
     data = self.data.iloc[:, :-2]
     data = data.applymap(lambda x: abs(x))
     shap_mean = data.apply(lambda x: x.mean())
     self.shap_mean = shap_mean.sort_values(ascending=True)
     ori = load(read_file)
     self.ori = ori.reindex(self.data.ID.values)
Ejemplo n.º 10
0
Archivo: main.py Proyecto: dylan007/ml
def loadAll():
	files = ['data_batch_1','data_batch_2','data_batch_3','data_batch_4','data_batch_5']
	path = '/home/dylan/Downloads/cifar-10-batches-py/'
	Xtr = load.load(path + 'data_batch_1')[0]
	Ytr = load.load(path + 'data_batch_1')[1]
	for i in range(1,len(files)):
		p = path + files[i]
		resx,resy = load.load(p)
		print Xtr.shape,resx.shape	
		Xtr = np.concatenate((Xtr,resx))
		Ytr = np.concatenate((Ytr,resy))
	res = load.load(path + 'test_batch')
	return Xtr,Ytr,res[0],res[1]
Ejemplo n.º 11
0
 def change_background(self):
     if self.scenery == "outside":
         self.game.fill(BLACK)
         self.sound.play()
         req_egg_text.__init__(295, 100,
                               ("Required eggs: %s" % chicken.req_egg),
                               WHITE, "helvetica", 32)
         req_egg_text.draw(gameCanvas.game)
         nightText.draw(gameCanvas.game)
         nightGame.__init__(300, 30, 250, 450, foodBarFront.width)
         pygame.display.update()
         pygame.time.wait(2000)
         self.file = pygame.image.load(
             'graphics//background2.png').convert()
         self.file2 = pygame.Surface([self.width, self.height],
                                     pygame.SRCALPHA, 32)
         timeBarFront.width += 75
         self.scenery = "inside"
     if timeBarFront.width <= 1:
         if self.scenery == "inside":
             self.game.fill(BLACK)
             self.sound.play()
             chicken.egg_highscore += chicken.egg
             if chicken.egg < chicken.req_egg:
                 if chicken.egg_highscore > load.load():
                     load.save(chicken.egg_highscore)
                 scoreText.__init__(300, 120,
                                    ("Score: %s" % chicken.egg_highscore),
                                    WHITE, "helvetica", 45)
                 scoreText.draw(gameCanvas.game)
                 highscoreText = text.Text(290, 400,
                                           ("Highscore: %s" % load.load()),
                                           WHITE, "helvetica", 45)
                 highscoreText.draw(gameCanvas.game)
                 gameOverAnim()
             else:
                 dayText.draw(gameCanvas.game)
                 chicken.egg = 0
                 chicken.req_egg += 2
                 pygame.display.update()
                 pygame.time.wait(2000)
                 self.file = pygame.image.load(
                     'graphics//background.png').convert()
                 self.file2 = pygame.image.load(
                     'graphics//hegn.png').convert_alpha()
                 timeBarFront.next_level()
                 foodBarFront.next_level()
                 timeBarFront.reset()
                 foodBarFront.reset()
                 self.scenery = "outside"
Ejemplo n.º 12
0
def mainLoop():
    global gameExit
    highscoreText.__init__(300, 400, ("Highscore: %s" % load.load()),
                           (75, 180, 50), "helvetica", 30)
    clock = pygame.time.Clock()
    while not gameExit:
        for event in pygame.event.get():
            if event.type == pygame.QUIT:
                gameExit = True
                pygame.quit()
        menuCanvas.draw()

        playButton.draw()
        playText.draw(menuCanvas.canvas)

        optionsButton.draw()
        optionsText.draw(menuCanvas.canvas)

        exitButton.draw()
        exitText.draw(menuCanvas.canvas)

        maxwellText.draw(menuCanvas.canvas)
        highscoreText.draw(menuCanvas.canvas)
        linctusText.draw(menuCanvas.canvas)
        soundcloudText.draw(menuCanvas.canvas)

        if menuCanvas.imageON == True:
            menuCanvas.canvas.blit(optionsButton.image, menuCanvas.rect)
            optionsCloseButton.draw()
            optionsCloseText.draw(menuCanvas.canvas)

        pygame.display.update()
        clock.tick(60)
Ejemplo n.º 13
0
def dectree():
    inputs,targets = load("../proc_data/data",equalize=False,shuffle=True)
    train_split = 0.8

    #Split between train and test
    length = len(inputs)
    split = int(length*train_split)
    inputs_train = inputs[:split]
    targets_train = targets[:split]
    inputs_test = inputs[split:]
    targets_test = targets[split:]

    print length,"total inputs"
    print len(inputs_train),"used for training"
    print len(inputs_test),"used for testing"

    start = time.time()
    clf = tree.DecisionTreeClassifier(criterion="entropy",max_depth=6,random_state=42)
    print "Training Tree..."
    clf.fit(inputs_train, targets_train)
    print "Training time:",time.time()-start

    print "Predicting tests..."
    start = time.time()
    targets_predicted = clf.predict(inputs_test)
    print "Classification time:",time.time()-start
    print metrics.classification_report(targets_test, targets_predicted)
    

    print "Saving classifier to disk..."
    with open("../classifiers/tree","w") as clffile:
        pickle.dump(clf,clffile)
Ejemplo n.º 14
0
def exitsavwar():
    try:
        filename
    except:
        exit()
    else:
        try:
            stufftemp = {}
            worldtemp = []
            stufftemp, worldtemp = load(filename, stufftemp, worldtemp)

            if stufftemp != stuff or worldtemp != world:
                filewin = Toplevel(main)

                label = Label(
                    filewin,
                    text=
                    "You have unsaved data, are you sure you want to continue?"
                )
                label.grid(row=0, columnspan=30, column=0)

                dont = Button(filewin, text="yes", command=lambda: exit())
                dont.grid(column=14, row=1)

                yeah = Button(filewin,
                              text="no",
                              command=lambda: filewin.destroy())
                yeah.grid(column=15, row=1)
            else:
                exit()
        except:
            exit()
Ejemplo n.º 15
0
 def loads(self):
     """
     将插件对象插入到线程池工作队列
     """
     tempRes = load.load()
     for mod in tempRes:
         self.__tp.addTask(tempRes[mod])
Ejemplo n.º 16
0
 def test_load3(self):
     expected = {'you': {'say': {'test1': {'foo': 'bar'}}}}
     actual = load(os.path.join(DATA_PATH, 'test3'))
     logging.info('Expected: %s', str(expected))
     logging.info('Actual: %s', str(actual))
     self.assertEqual(expected, actual,
                      'Directory with Directory with file')
Ejemplo n.º 17
0
def button():
    global dataset
    print(dataset)
    loaded = load(dataset)
    return flask.render_template("query_result.html",
                                 loaded=loaded,
                                 loading=True)
Ejemplo n.º 18
0
def SHAPit():
    if var_read.get():
        df = load(var_read.get())
        label = list(lb_label.get(0, tk.END))
        feature = list(lb_feature.get(0, tk.END))
        feature.extend(label)
        data = df.loc[:, feature].copy()
        data[label[0]] = data[label[0]].map(lambda x: str(x))
        l = Loop(data,
                 label[0],
                 loop=var_loop.get(),
                 n_splits=var_splits.get(),
                 sample_mode=cmb.get(),
                 major_label=var_major.get(),
                 minor_label=var_minor.get())
        out = l.Loop()
        var_data.set(out.to_dict())
        if not messagebox.askyesno('Finished!',
                                   'Would you want to save the result?'):
            savefile = asksaveasfilename(filetypes=(("Excel file",
                                                     "*.xlsx*;*.xls*"),
                                                    ("csv file", "*.csv*"),
                                                    ("Text file", "*.txt*")))
            out.to_csv(savefile, index=False)
        else:
            pass

    else:
        pass
Ejemplo n.º 19
0
 def Load(self):
     data = load(self.loadfile)
     self.df = data
     #        self.df = abs(self.df)
     self.length = len(self.df)
     self.hit_all = len(self.df[self.df[self.label_col] == 1])
     self.scorers = pd.Series(self.score_col)
Ejemplo n.º 20
0
def pytest_generate_tests(metafunc):
  args = []
  for f in test_files:
    rt = build_rt(os.path.basename(f))
    for form in load(file(f)):
      args.append((rt, form))
  metafunc.parametrize(metafunc.funcargnames, args)
Ejemplo n.º 21
0
def test(n=10, use_fit_transform=True):
    '''
    Test BOW() object for n rows of the training dataset. 

    inputs:
        - n (int): number of rows of the training dataset to test BOW formulation on
        - use_fit_transform (bool): if True, test is done using the BOW.fit_transform() method, else use BOW.fit() then BOW.transform() separately
    '''

    import load

    X_train, y_train, X_test, y_test = load.load(True, False, False, False)
    bow = BOW()
    if not use_fit_transform:
        bow.fit(X_train, new_pickle=False)
        output = bow.transform(X_train[0:n])
        _using = 'fit() THEN tranform()'
    else:
        output = bow.fit_transform(X_train[0:n], new_pickle=False)
        _using = 'fit_transform()'
    print('Using {0}...\nWords found in each header/article\n'.format(_using),
          np.sum(output, axis=1))
    output = bow.fit_transform(X_train[0:n],
                               new_pickle=False,
                               separate_vectors=True)
    print('Words in each header, then words in each article\n{0}\n{1}'.format(
        np.sum(output[0], axis=1), np.sum(output[1], axis=1)))
Ejemplo n.º 22
0
def supvec():
    inputs, targets = load("../proc_data/data", equalize=False)
    train_split = 0.8

    #Split between train and test
    length = len(inputs)
    split = int(length * train_split)
    inputs_train = inputs[:split]
    targets_train = targets[:split]
    inputs_test = inputs[split:]
    targets_test = targets[split:]

    print length, "total inputs"
    print len(inputs_train), "used for training"
    print len(inputs_test), "used for testing"

    start = time.time()
    clf = svm.SVC(kernel="linear", verbose=False)
    print "Training SVM..."
    clf.fit(inputs_train, targets_train)
    print "Training time:", time.time() - start

    print "Predicting tests..."
    start = time.time()
    targets_predicted = clf.predict(inputs_test)
    print "Classification time:", time.time() - start
    print metrics.classification_report(targets_test, targets_predicted)

    print "Saving classifier to disk..."
    with open("../classifiers/svm", "w") as clffile:
        pickle.dump(clf, clffile)
Ejemplo n.º 23
0
def draw_pr(file, label_col, Ascore_col, Dscore_col, figsize=(5, 5)):
    data = load(file)
    score_col = Ascore_col + Dscore_col
    label = data.pop(label_col)
    font_kws = {'family': 'arial', 'size': 18}
    f, ax = plt.subplots(figsize=figsize)
    for col in score_col:
        score = data.loc[:, col]
        if col in Ascore_col:
            score = 1 - score
        else:
            pass
        precision, recall, _ = precision_recall_curve(label,
                                                      score,
                                                      pos_label=1)
        AUC = auc(recall, precision)
        ax.plot(recall,
                precision,
                linewidth=1.5,
                label=col + ' (auc=%.3f)' % AUC)
    ax.set_title('Precision/Recall Curve')  # give plot a title
    ax.set_xlabel('Recall', fontdict=font_kws)  # make axis labels
    ax.set_ylabel('Precision', fontdict=font_kws)
    ax.tick_params(direction='in', which='both', labelsize=12)
    ax.set_ylim([0.0, 1.0])
    ax.set_xlim([0.0, 1.0])
    ax.legend(fontsize=6.5)
    plt.show()
    return f
Ejemplo n.º 24
0
def drawpr(folder_path='.',
           label_field='Label',
           score_field='average',
           figsize=(5, 5),
           ascending=False,
           savedir=None):
    files = os.listdir(folder_path)
    font_kws = {'family': 'arial', 'size': 18}
    f, ax = plt.subplots(figsize=figsize)

    if ascending:
        pos_label = 0
    else:
        pos_label = 1

    i = 0
    rounds = len(files)
    length = 10

    for file in files:
        target = re.findall('\d\d_(.*?)_\w', file)[0]
        data = load(os.path.join(folder_path, file))

        label = data[label_field].values
        score = data[score_field].values

        precision, recall, _ = precision_recall_curve(label,
                                                      score,
                                                      pos_label=pos_label)
        area = auc(recall, precision)

        ax.plot(recall,
                precision,
                linewidth=1.5,
                label='{} (AUC={})'.format(target, '%.3f' % area))
        i += 1
        ratio = i / rounds
        num = int(length * ratio)
        p = ''.join([
            '|', '>' * num, '*' * (length - num), '|', ' ',
            '{}%'.format(round(ratio * 100, 1))
        ])
        print(p, end='\r')

    ax.set_xlabel('Recall', fontdict=font_kws)  # make axis labels
    ax.set_ylabel('Precision', fontdict=font_kws)
    ax.set_ylim([0.0, 1.0])
    ax.set_xlim([0.0, 1.0])
    ax.spines['bottom'].set_linewidth(1.3)
    ax.spines['left'].set_linewidth(1.3)
    ax.spines['top'].set_linewidth(1.3)
    ax.spines['right'].set_linewidth(1.3)
    ax.tick_params(direction='in', which='both', labelsize=12)
    ax.legend(fontsize=6.5)

    #    ax.plot([0, 1], [0, 1], '--', color=(0.6, 0.6, 0.6), label='Random')
    if savedir:
        plt.savefig(savedir)
    plt.show()
    return f
Ejemplo n.º 25
0
def main():
    try:
        env = {}
        for lib in ['stdlib.lisp', 'bq.lisp']:
            with open(lib) as f:
                try:
                    env.update(load(f, lib, env))
                except (LoadError, LoadWarning) as e:
                    print_exception(e)
                    exit(2)

        while True:
            try:
                expr = raw_input('* ')
            except (KeyboardInterrupt, EOFError):
                print
                break
            if not expr:
                continue
            try:
                form = Reader(expr, '<string>').read()
                result = eval_form(form, env)
            except ReadError as e:
                print_exception(e)
                continue
            if isinstance(result, Error):
                print_error(result)
            print pprint(result)

            env[Symbol('_')] = result
    except KeyboardInterrupt:
        print
Ejemplo n.º 26
0
def __register_events():
    p = os.path.join(os.environ['TATOOL'], r'Common\Tools\ImportModule')
    if p not in sys.path:
        sys.path.append(p)

    import load

    project_path = '%s/../../Tools/' % os.path.dirname(
        os.path.realpath(__file__))
    for path in os.listdir(project_path):
        register_event_script_path = '%s/%s/Package/Scripts/registerEvents' % (
            project_path, path)
        if os.path.exists(register_event_script_path +
                          '.py') or os.path.exists(register_event_script_path +
                                                   '.pyc'):
            load.load('register_events', register_event_script_path)
Ejemplo n.º 27
0
def submission(fname_net='net2.pickle'):
    with open(fname_net, 'rb') as f:
        net = pickle.load(f)     # net = specialists

    X = load.load(test=True)[0]
    y_pred = net.predict(X)
    print 'Finish predicting test file'
    columns = 'left_eye_center_x,left_eye_center_y,right_eye_center_x,right_eye_center_y,left_eye_inner_corner_x,left_eye_inner_corner_y,left_eye_outer_corner_x,left_eye_outer_corner_y,right_eye_inner_corner_x,right_eye_inner_corner_y,right_eye_outer_corner_x,right_eye_outer_corner_y,left_eyebrow_inner_end_x,left_eyebrow_inner_end_y,left_eyebrow_outer_end_x,left_eyebrow_outer_end_y,right_eyebrow_inner_end_x,right_eyebrow_inner_end_y,right_eyebrow_outer_end_x,right_eyebrow_outer_end_y,nose_tip_x,nose_tip_y,mouth_left_corner_x,mouth_left_corner_y,mouth_right_corner_x,mouth_right_corner_y,mouth_center_top_lip_x,mouth_center_top_lip_y,mouth_center_bottom_lip_x,mouth_center_bottom_lip_y'
    columns = columns.split(',')

    y_pred = y_pred * 48 + 48
    y_pred = y_pred.clip(0, 96)
    df = DataFrame(y_pred, columns=columns)

    lookup_table = read_csv(os.path.expanduser('./data/IdLookupTable.csv'))
    values = []

    for index, row in lookup_table.iterrows():
        values.append((
            row['RowId'],
            df.ix[row.ImageId - 1][row.FeatureName],
            ))

    now_str = datetime.now().isoformat().replace(':', '-')
    submission = DataFrame(values, columns=('RowId', 'Location'))
    filename = 'submission-{}.csv'.format(now_str)
    submission.to_csv(filename, index=False)
    print("Wrote {}".format(filename))
Ejemplo n.º 28
0
 def setUp(self):
     self.l = load("intents.json")
     self.data = self.l.getData()
     for t in self.data["intents"]:
         if t['tag'] == "greeting":
             self.responses = t['responses']
         if t["tag"] == "music":
             self.responsesmusic = t['responses']
Ejemplo n.º 29
0
def va(request):
    keyW = request.GET.get('varity', default='热血')

    list = []
    list = load.load(list, keyW)
    random.shuffle(list)
    resp = {'data': list}
    return HttpResponse(json.dumps(resp), content_type="application/json")
Ejemplo n.º 30
0
def main():
    gs, gs_target = load()
    #envSettings = dict(MKL_NUM_THREADS=1, NUMEXPR_NUM_THREADS=1, OMP_NUM_THREADS=1)

    with timed_block('TP penalty gauge opt'):
        gs_gaugeopt = pygsti.gaugeopt_to_target(gs, gs_target,
                                                item_weights={'spam' : 0.0001, 'gates':1.0},
                                                TPpenalty=1.0)
Ejemplo n.º 31
0
 def create_embeddings(self, image_folder):
     features_deque = deque()
     names_deque = deque()
     for raw_images, file_paths, total in load(image_folder):
         features_deque.extend(self.get_features(raw_images))
         names_deque.extend(file_paths)
     return np.array(names_deque, np.unicode_), \
         np.array(features_deque, np.float32)
Ejemplo n.º 32
0
def main():

    dat = load.load()
    x = dat[:, 1]
    y = dat[:, 2]
    tmp = matmake(x, y, 3)

    display(x, y, calc(tmp[0], tmp[1]), 3)
Ejemplo n.º 33
0
def main():
    try:
        cmd = sys.argv[1]
        assert(cmd in cmds)
    except:
        invocations = ["python pypitches.py {0}".format(cmd) for cmd in cmds]
        print "usage:  " + "\n        ".join(invocations)
        sys.exit()
    if cmd == 'initdb':
        setup_postgres.initdb(postgres_db, postgres_user, postgres_password)
        sys.exit()
    else:
        import model
        model.SessionManager.create(postgres_db, postgres_user, postgres_password)
        import load
        from web.app import app
        import select_gamedirs
        from plot_pitch_locations import do_plots

    if cmd == 'web':
        app.run()
    elif cmd == 'webtest':
        app.run('pypitchestest', 'pypitches', 'slider')
    elif cmd == 'ipython':
        embed()
    elif cmd == 'file':
        # will generate output by a config file
        # a la plot_pitch_locations.py
        assert len(sys.argv) > 2, "usage: python pypitches.py file file.yaml"
        do_plots(sys.argv[2])

    elif cmd == 'download':
        # hit the MLBAM server and get it all
        pass
    elif cmd == 'classify':
        with model.SessionManager.get().begin_nested():
            static_dir = sys.argv[2]
            select_gamedirs.classify_local_dirs_by_filesystem(static_dir)
        model.SessionManager.commit()
    elif cmd == 'load':
        statuses=set(sys.argv[2:]) or set(['final'])
        with model.SessionManager.get().begin_nested():
            load.load(statuses)
        model.SessionManager.commit()
Ejemplo n.º 34
0
def main():
    X, Y = load('train.csv')
    adaboost = AdaBoostClassifier(n_estimators=150, learning_rate=0.1)
    adaboost.fit(X, Y)
    X_test, ID = loadTest('test.csv')
    target = adaboost.predict_proba(X_test)
    df = pandas.DataFrame()
    df['TARGET'] = target[:,1]
    df.index = pandas.Series(ID, name='ID')
    df.to_csv('sumbit.csv')
Ejemplo n.º 35
0
 def __init__(self, remoteShell, domainAdmin="admin", domain=None):
     self.remoteShell = remoteShell
     self.vastoolPath = "/opt/quest/bin/vastool"     
     self.domainAdmin = domainAdmin
     self.defaultDomain = domain
     
     self.info = info.info(self.run)
     self.flush = flush.flush(self.run)
     self.create = create.create(self.run, self.defaultDomain)
     self.delete = delete.delete(self.run)
     self.timesync = timesync.timesync(self.run)
     self.nss = nss.nss(self.run)
     self.group = group.group(self.run)
     self.isvas = isvas.isvas(self.run)
     self.list = list.list(self.run)
     self.auth = auth.auth(self.run, self.defaultDomain)
     self.cache = cache.cache(self.run)
     self.configure = configure.configure(self.run)
     self.configureVas = configureVas.configureVas(self.run)
     self.schema = schema.schema(self.run)
     self.merge = merge.merge(self.run)
     self.unmerge = unmerge.unmerge(self.run)
     self.user = User.user(self.run)
     self.ktutil = ktutil.ktutil(self.run)
     self.load = load.load(self.run)
     self._license = License.License(self.run)
     self.License = self._license.License
     self.parseLicense = self._license.parseLicense
     self.compareLicenses = self._license.compareLicenses
     #self.vasUtilities = vasUtilities.vasUtilities(self.remoteShell)
     self.unconfigure = unconfigure.unconfigure(self.run)
     self.nssdiag = nssdiag(self.run)
     
     isinstance(self.info, info.info)
     isinstance(self.flush, flush.flush)
     isinstance(self.create, create.create)
     isinstance(self.delete, delete.delete)
     isinstance(self.timesync, timesync.timesync)
     isinstance(self.nss, nss.nss)
     isinstance(self.group, group.group)
     isinstance(self.isvas, isvas.isvas)
     isinstance(self.list, list.list)
     isinstance(self.auth, auth.auth)
     isinstance(self.cache, cache.cache)
     isinstance(self.configure, configure.configure)
     isinstance(self.configureVas, configureVas.configureVas)
     isinstance(self.schema, schema.schema)
     isinstance(self.merge, merge.merge)
     isinstance(self.unmerge, unmerge.unmerge)
     isinstance(self.user, User.user)
     isinstance(self.ktutil, ktutil.ktutil)
     isinstance(self.load, load.load)
     #isinstance(self.vasUtilities, vasUtilities.vasUtilities)
     isinstance(self.unconfigure, unconfigure.unconfigure)
     isinstance(self.nssdiag, nssdiag)
Ejemplo n.º 36
0
def main():
    from load import load
    import matplotlib.pyplot as plt

    im = load('data/doodle.txt')
    plt.imshow(im)
    plt.show()

    from tom import sol1
    test = sol1(im)
    return test
Ejemplo n.º 37
0
 def loads(self):
     """
     将插件对象插入到线程池工作队列
     """
     tempRes = load.load()
     self.maxCount = len(tempRes)  # 赋值最大任务数
     if self.maxCount == 0:
         return False
     for mod in tempRes:
         self.__tp.addTask(tempRes[mod])
     return True
Ejemplo n.º 38
0
def main():
    X, Y = load('train.csv')
    boost = GradientBoostingClassifier(n_estimators=150,verbose=1, max_features=8, max_depth=3)

    boost.fit(X, Y)
    X_test, ID = loadTest('test.csv')
    target = boost.predict_proba(X_test)
    df = pandas.DataFrame()
    df['TARGET'] = target[:,1]
    df.index = pandas.Series(ID, name='ID')
    df.to_csv('sumbit.csv')
def compete(submit=False, history=10):
  test = load('test.csv')
  predicted = []
  beta0 = None

  i = 0
  for month, month_test in test.groupby('monthage'):
    i += 1
    #if i != 20:
    #  continue

    month_test = month_test.copy()
    # Use only last history months.
    #month_train = train[(train['monthage'] <= month) & (train['monthage'] > month - history)]
    month_train = train[train['monthage'] <= month]

    nbsm = NonlinearBikeShareModel(month_train)

    #if beta0 is None:
    #  beta0 = nbsm.beta0()

    res = optimize.minimize(nbsm, nbsm.beta0(), method='L-BFGS-B', jac=True, bounds=nbsm.bounds(), options={'disp': False, 'maxiter': 10000})
    #res = optimize.basinhopping(
    #  nbsm,
    #  nbsm.beta0(),
    #  minimizer_kwargs={
    #    'method':'L-BFGS-B',
    #    'jac':True,
    #    'bounds': nbsm.bounds(),
    #    'options': {'disp': False, 'maxiter': 500}
    #  },
    #  disp=True,
    #  niter=10
    #)
    print '%s training error:' % month, nbsm(res.x)[0]

    month_test['count'] = nbsm.predict(month_test, res.x)
    #nbsm.show(res.x)
    predicted.append(month_test)
    #beta0 = res.x

  predicted = pandas.concat(predicted)

  plt.plot_date(train['dates'], train['count'], label='train')
  plt.plot_date(predicted['dates'], predicted['count'], label='predicted')
  plt.legend()
  plt.show()
  plt.clf()

  if submit:
    submission = pandas.DataFrame()
    submission['datetime'] = test['dates']
    submission['count'] = predicted['count'] #.round().astype('int')
    submission.to_csv('submission-' + datetime.datetime.now().strftime('%Y-%m-%d-%H-%M') + '.csv', index=False)
Ejemplo n.º 40
0
	def calorie(items, quantity):
		food = load()
		totalCal = 0
		totalQuantity = 0
		for i in range(len(items)):
			for j in range(len(food.items)):
				l = food.items[j].split(',')
			 	if l[0].split('\"')[1] == items[i]:
			 		totalCal = totalCal + int(l[2].split("\"")[1]) * int(quantity[i])
			 		totalQuantity = totalQuantity + int(quantity[i])
		
		return ( float(totalCal) / float(totalQuantity) )
Ejemplo n.º 41
0
    def plot(self, filename, centroid, step):

        X = load(filename)
        dist = [[norm(x-c) for c in centroid] for x in X]
        clusters = np.argmin(dist, 1)

        cm = get_cmap('jet', self.K)
        fig = plt.figure()
        plt.scatter(X[:,0], X[:,1], marker = 'o', c=clusters, cmap=cm)
        plt.scatter(centroid[:,0], centroid[:,1], marker='o', linewidths=10)
        fig.show()
        plt.savefig('../figures/'+self.name +'_' + step + '.eps')
Ejemplo n.º 42
0
Archivo: p1.py Proyecto: NPSDC/Codes
def main():
	data = load('bank-additional-full.csv')
	tot_no_rows = len(data)	
	col_names = ["age", "job", "marital", "education", "default", "housing", "loan", "contact", "month", "day_of_week", "duration", "campaign", "pdays", "previous", "poutcome", "empvarrate", "conspriceidx", "consconfidx", "euribor3m", "nremployed" , "y"]
	accuracy = np.empty([10, ], dtype = float )	
	no_of_iteration = 10
	for i in xrange(no_of_iteration):
		accuracy[i] = do_computation(data, col_names, tot_no_rows)
	print accuracy
	mean = np.mean(accuracy)
	st_dev = np.std(accuracy)
	print mean, st_dev
	'''for key in unique_yes.keys():
def run(dataset=None, filename=None, path="./Data", sep=",", minsupport=0.1, min_factor=0.5):

    # reading binarize file(changing binarize file into transaction)
    filepath = path + "/" + dataset + "/" + filename
    load.load(dataset=dataset, filename=filename)

    # reading binarized transactions
    transpath = path + "/" + dataset + "/" + "trans.json"
    with open(transpath, "r") as fp:
        d = json.load(fp)

        # running apriori with minsupport to get frequency sets
    l, support_data, c, f = apriori.apriori(d, minsupport=0.1)
    # print("l is",l)
    # print("support is",support_data)

    # printing frequency set with support
    """
	filepath=path + "/" + dataset + "/support_" + str(minsupport) + ".csv"
	writer = csv.writer(open(filepath, 'wb'))
	for key, value in support_data.items():
	   	writer.writerow([list(key)[:], value])
	"""

    # generating maximal and closed itemset
    print("# of candidate itemset is", c)
    print("# of frequent itemset is", f)
    s, sc = maximal_itemset.maximal(l)
    print("# of maximal frequent itemset", sc)
    c, cc = closed_itemset.closed(l, support_data)
    print("# of closed frequent itemset is", cc)
    # print("support data is",support_data)
    # mining.generateRules(l,support_data)

    # generating rules
    # min_lift=min_factor
    # rules,noofrules=mining_lift.generateRules(l,support_data,min_factor=0.85)
    """
Ejemplo n.º 44
0
def demo():
    """This is use-cases registry
    """
    aligned_sents = load('../../data/giza/alignment-en-fr', 'latin_1')
    use_case_1(aligned_sents)
    print
    use_case_2(1, 1, aligned_sents)
    print
    use_case_3(aligned_sents)
    print
    use_case_4(aligned_sents)
    print
    use_case_5(aligned_sents)
    print
    use_case_6(aligned_sents)
    print
    use_case_7()
    print
    use_case_8(aligned_sents)
Ejemplo n.º 45
0
 def test_load_file(self):
     with open("constant.bp") as f:
         self.assertEqual(
             load.load(f),
             [
                 "@label@;constant",
                 "@forcing_function@;1.",
                 "@ideal@;0.5 * @forcing_function@ * ((x - x_l)**2 - "
                 "(x - x_l)*(x_r - x_l)) + u_l + u_r * "
                 "(x - x_l) / (x_r - x_l)",
                 "@n@;11",
                 "@label@;default",
                 "@forcing_function@;1.",
                 "@x_l@;0.",
                 "@x_r@;1.",
                 "@u_l@;0.",
                 "@u_r@;0",
                 "@n@;11",
             ],
         )
Ejemplo n.º 46
0
Archivo: rt.py Proyecto: bodil/lolisp
 def load(self, scope, stream):
   for sexp in load(stream):
     self.execute(scope, sexp)
Ejemplo n.º 47
0
import numpy as np
import re

from load import load

arr = load('data/doodle.txt')
n = arr.shape[0]
m = arr.shape[1]
arr2 = np.zeros((n, m)) 

with open('output2.txt') as f:
    instr_nbr = int(f.readline())
    instr_it = f.readlines()
    for i, instr in enumerate(instr_it):
        line = instr.split(' ')
        if(line[0] == "PAINTSQ"):
            r = int(line[1])
            c = int(line[2])
            s = int(line[3])
            for k in range (-s,s+1):
                for p in range (-s, s+1):
                    arr2[r+k][c+p] = 1
        elif(line[0] == "ERASECELL"):
            r = int(line[1])
            c = int(line[2])
            arr2[r][c] = 0

result = True
for k in range (0,n):
    for p in range (0,m):
        result = result & (arr[k,p] == arr2[k,p])
Ejemplo n.º 48
0
import SETTINGS
import logging

import sqlaload as sl

from extract import extract
from entities import create_entities, update_entities
from load import load
from setup import setup, make_grano
from transform import transform
from network_entities import update_network_entities


if __name__ == '__main__':
    import sys
    logging.basicConfig(level=logging.DEBUG)
    assert len(sys.argv) == 3, "Usage: %s [ir_source_file] [ap_source_file]"
    ir_source_file = sys.argv[1]
    ap_source_file = sys.argv[2]
    engine = sl.connect(SETTINGS.ETL_URL)
    extract(engine, ir_source_file, ap_source_file)
    update_network_entities(engine, 'network_entities.csv')
    create_entities(engine)
    update_entities(engine, 'entities.csv')
    transform(engine)
    grano = make_grano()
    setup(engine, grano)
    load(engine, grano)
import numpy as np

test = [[48.838566,2.332006], [48.830487,2.356382], [48.850484,2.283855], 
		[48.873071,2.294498], [48.891527,2.334667], [48.887972,2.379814],
		[48.867594,2.40007], [48.861553,2.335525]]
test = np.array(test)


def find_spread_point(G, N):
    sp = np.zeros(8)
    min_d = np.ones(8)*100000000000000
    for i in range(N):
        for j in range(8):
            l = sum((G.node[i]['pos'] - test[j])**2)
            if min_d[j] > l:
                min_d[j] = l
                sp[j] = i
    return sp


if __name__ == '__main__':
    from load import load
    G, params = load()
    s_points = find_spread_point(G, params[0])
Ejemplo n.º 50
0
print 'CSE 5243 Similarity Analysis by Kun Liu & Zhe Dong'

import load
import minhash
import time

begin_time=time.time()

(Data, SparseData, Cnt, rdim, cdim) = load.load()

#convert data into 0/1
#for i in range(0, rdim):
#	for j in range(0, cdim):
#		if Data[i][j] != 0:
#			Data[i][j] = 1
#print "done1"

#import sklearn.metrics.pairwise
#Pair_Wise_Dist = sklearn.metrics.pairwise.pairwise_distances(Data,metric='jaccard')
#print "done2"

#Mean Squared Error
def similarity_MSE(true_sim, hash_sim):
    mse = 0
    for i in range(0,len(true_sim)):
        for j in range(0, len(true_sim[i]) ):
            mse += pow(true_sim[i][j] - hash_sim[i][j],2)
    
    return mse / len(true_sim)/len(true_sim[0])
    
#compute true similarity
Ejemplo n.º 51
0
    input_shape=(None, 9216),  # 96x96 input pixels per batch
    hidden_num_units=100,  # number of units in hidden layer
    output_nonlinearity=None,  # output layer uses identity function
    output_num_units=28,  # 28 target values

    # optimization method:
    update=nesterov_momentum,
    update_learning_rate=0.005,
    update_momentum=0.9,

    regression=True,  # flag to indicate we're dealing with regression problem
    max_epochs=400,  # we want to train this many epochs
    verbose=1,
    )

X, y = load()
print "Done loading"
print("X.shape == {}; X.min == {:.3f}; X.max == {:.3f}".format(
    X.shape, X.min(), X.max()))
print("y.shape == {}; y.min == {:.3f}; y.max == {:.3f}".format(
    y.shape, y.min(), y.max()))
net1.fit(X, y)

# Training for 400 epochs will take a while.  We'll pickle the
# trained model so that we can load it back later:
import cPickle as pickle
with open('net1.pickle', 'wb') as f:
    pickle.dump(net1, f, -1)

train_loss = np.array([i["train_loss"] for i in net1.train_history_])
valid_loss = np.array([i["valid_loss"] for i in net1.train_history_])
Ejemplo n.º 52
0
import tensorflow as tf
import numpy as np
from load import load
from collections import defaultdict
import random

epoch_count = 10
batch_size = 128

alphabet = "abcdefghijklmnopqrstuvwxyz0123456789-,;.!?:'\"/\\|_@#$%^&*~`+-=<>()[]{}"
alphabet_size = len(alphabet)
max_text_len = 1014

class_num, train_set, test_set = load("AG", alphabet, max_text_len)

# char cnn
def weight_init(shape):
    initial = tf.random_normal(shape, mean=0.0, stddev=0.02, dtype=tf.float32)
    return tf.Variable(initial)

# batch x max_text_len
x = tf.placeholder(tf.int32, [None, max_text_len])
y = tf.placeholder(tf.int32, [None])

x_onehot = tf.one_hot(x, alphabet_size)
y_onehot = tf.one_hot(y, class_num)

hidden_size = 20
input_list = tf.unpack(x_onehot, axis=1)
cell = tf.nn.rnn_cell.GRUCell(hidden_size)
# state = cell.zero_state(...)
Ejemplo n.º 53
0
import os
import sys
from load import load

for root, dirs, files in os.walk(sys.argv[1]):
    for f in files:
        load(root + '/' + f)
Ejemplo n.º 54
0
print 'CSE 5243 Associate Rule Analysis by Kun Liu & Zhe Dong'

#load data
import load
(Data,Label,cdim) = load.load()

for i in range(0,5):
	
	#split data
	Train_Data = []
	Train_Label = []
	for j in range(0,5):
		if j!=i:
			Train_Data += Data[j]
			Train_Label += Label[j]
	Test_Data = Data[i]
	Test_Label = Label[i]

	#convert data into apriori format
	F1 = open('train'+str(i), 'w')
	for j in range(0, len(Train_Data)):
		#print 'doc #' + str(j) + ' with ' + str(len(Train_Data[j])) + 'keywords'
		for k in range(0, len(Train_Data[j])):
			F1.write(str(Train_Data[j][k]) + ' ')
		F1.write(Train_Label[j] + '\n')

	#generate appearance.txt
	F2 = open('appearance'+str(i), 'w')
	F2.write('antecedent\n')
	Label_Set = set(Train_Label)
	for label in Label_Set:
Ejemplo n.º 55
0
from load import load
from rt import RT, LispException

import sys, os.path
from argparse import ArgumentParser

if __name__ == "__main__":

  arg_parser = ArgumentParser(description = "Run you a lisp!")
  arg_parser.add_argument("file", nargs = "?", help = "Lolisp source file to run")
  args = arg_parser.parse_args()

  rt = RT()
  rt.load(rt.ns, file(os.path.join(sys.path[0], "rt.loli")))

  if args.file:
    rt.load(rt.ns, file(args.file))
  else:
    while 1:
      print ">>> ",
      s = sys.stdin.readline()
      if not s:
        break
      sexps = load(s)
      for sexp in sexps:
        try:
          print "=> %s" % repr(rt.execute(rt.ns, sexp))
        except LispException as e:
          print "***", str(e)
Ejemplo n.º 56
0
import cPickle as pickle

from pyhsmm.util.text import progprint_xrange
from pyhsmm.util.stats import whiten, cov

import autoregressive.models as models
import autoregressive.distributions as distributions

from load import load

np.random.seed(0)


### load

data = load()
data = whiten(data - data.mean(0))

ndim = data.shape[1]

### model

Nmax = 20
affine = True
nlags = 1

model = models.FastARWeakLimitStickyHDPHMM(
    alpha=10., gamma=10.,
    kappa=1e4,
    init_state_distn='uniform',
    obs_distns=[
Ejemplo n.º 57
0
    def __init__(self):
        self.t = init_t
        self.count = 0

    def clearCount(self):
        self.count = 0

    def updateT(self, total_f):
        t_history = self.t
        self.t = self.count / total_f
        return abs(t_history - self.t)
        

if __name__ == "__main__":
    # The collection of alignedSent should be provided by other team
    aligned_sents = load('../../data/giza/alignment-en-fr', 'latin_1')

    # generate word sets from corpus
    e = set()
    f = set()
    for aligned_sent in aligned_sents:
        for e_w in aligned_sent.sent1:
            e.add(e_w)
        for f_w in aligned_sent.sent2:
            f.add(f_w)
    f.add(NULL_TOKEN)

    entry_count = len(e) * len(f)
    e_given_f = defaultdict(TableCell)
    total = defaultdict(float)
    s_total = defaultdict(float)
Ejemplo n.º 58
0
			l = line.split(',')
			l = l[0].split("\"")
			if l[1] != name:
				ans = ans + copy
		        else:
				print "Match Found!!! Entry Deleted!!!"

		fp.close()
		os.remove("./files/basicItems.csv")
		fp = open("./files/basicItems.csv", 'w+')
		fp.write(ans)
		fp.close()
	
	@staticmethod
	def search(keyword):
		print "Entries Found:"
		fp = open("./files/basicItems.csv", 'r+')
		for line in fp:
			l = line.split(',')[1].split('\"')[1].split(':')
			if any(keyword in s for s in l):
				print line,
		fp.close()
		

a = basic()
a.search("fruit")
#a.storeFood("ddsd", "122")
#a.readFood("jam")
#a.deleteFood("jam")
b = load()