コード例 #1
0
    def DSS(self, split_type, prob):
        """
		split_type is either "splitting", or "sampling" with default values for fold1=fold2=5,
		or ["sampling", fold1, fold2]
		"""
        self.split_type = split_type
        self.prob = prob

        if not isinstance(self.split_type, list):
            self.split_type = [self.split_type]

        self.split_type[0] = self.split_type[0].lower()

        x, t = self.data[0], self.data[1]
        if self.split_type[0] == "splitting":
            subsample = np.random.binomial(1, self.prob,
                                           size=self.n).reshape(-1, 1)
            subsample = np.hstack(((subsample == 1), (subsample == 0)))
            fold1, fold2 = 1, 1

        elif self.split_type[0] == "sampling":
            N = int(self.prob * self.n)
            if len(self.split_type) == 3:
                fold1, fold2 = self.split_type[1], self.split_type[2]
            elif len(self.split_type) == 1:
                fold1, fold2 = 5, 5
            else:
                raise ValueError(
                    "split_type is either a string, or a list of size 1 or size 3."
                )

            subsample = np.random.choice(np.arange(self.n),
                                         size=(N, fold1 + fold2),
                                         replace=True)

        w_train = np.zeros((self.p, 1))
        w_tilde = w_train

        for j in range(fold1):
            model1 = models.Model(self.model, self.params, **self.kwargs)

            x_train, t_train = x[subsample[:, j], :], t[subsample[:, j]]
            w_train += model1.fit(x_train, t_train).w.reshape(-1, 1)

        for j in range(fold1, fold1 + fold2):

            model2 = models.Model(self.model, self.params, **self.kwargs)

            x_tilde, t_tilde = x[subsample[:, j], :], t[subsample[:, j]]
            w_tilde += model2.fit(x_tilde, t_tilde).w.reshape(-1, 1)

        # Taking average of estimates for multiple samples or multiple splittings. When we divid dataset to half,
        # fold1=fold2=1.
        self.w_train = w_train / fold1
        self.w_tilde = w_tilde / fold2

        self.w = np.vstack((self.w_train, self.w_tilde))

        return (self)
コード例 #2
0
    def test_get_model_raises_MultipleMatches_when_several_patterns_match(
            self):
        model1 = models.Model(name='switch1', pattern='foo', oids={})
        model2 = models.Model(name='switch2', pattern='bar', oids={})
        config = models.Config(models=(model1, model2), default_oids={})

        with self.assertRaises(models.MultipleMatches):
            config.get_model('sysDescr includes both foo and bar patterns')
コード例 #3
0
def confmodels():
    error = None
    form = ModelForm(request.form)
    if request.method == 'POST' and form.validate():

        ## sava conf to database with the user_id
        new_model = models.Model(
            form.expnme.data,
            form.timmax.data,
            form.timeunit.data,
            #form.inttime.data,
            form.initdate.data,
            form.inithour.data,
            form.initminute.data,
            form.centlat.data,
            form.centlon.data,
            form.nnXYp.data,
            form.deltaXY.data,
            session['user_id'])

        db.session.add(new_model)
        db.session.commit()
        return redirect(url_for('allmodels'))

    return render_template("confmodels.html",
                           error=error,
                           form=ModelForm(request.form),
                           username=session['name'])
コード例 #4
0
def write(bench_type, file_name="out.txt"):
    model = models.Model("737")
    if bench_type == "Random":
        nodes = BoardingMethods.Random()
    elif bench_type == "WindowMA":
        nodes = BoardingMethods.WindowMA()
    elif bench_type == "WMAGroups":
        nodes = BoardingMethods.WMAGroups()
    elif bench_type == "BackToFront":
        nodes = BoardingMethods.BackToFront()
    elif bench_type == "FrontToBack":
        nodes = BoardingMethods.FrontToBack()
    elif bench_type == "SteffenOptimal":
        nodes = BoardingMethods.SteffenOptimal()
    elif bench_type == "SteffenModified":
        nodes = BoardingMethods.SteffenModified()
    elif bench_type == "SimulationOptimal":
        nodes = BoardingMethods.SimulationOptimal()
    else:
        print("Invalid benchmark type")
        return 0

    open(file_name, 'w').close()
    text_file = open(file_name, "a")
    ticks_elapsed = 0
    while isDone(nodes) == 0:
        BoardingSimulator.generate_next(model, nodes)
        ticks_elapsed += 1
        text_file.write(model.str())
        text_file.write(("Tick: " + str(ticks_elapsed)).rjust(63) + "\n")
        text_file.write("[frame]\n")
    text_file.close()
    print("File written: " + file_name)
コード例 #5
0
def upload_mesh(db, object_id, original_path, cloud_path=None, mesh_path=None):
    import models
    r = models.find_model_for_object(db, object_id, 'mesh')
    m = None
    for model in r:
        m = models.Model.load(db, model)
        print "updating model:", model
        break
    if not m:
        m = models.Model(object_id=object_id, method='mesh')
        print "creating new model."
    m.store(db)
    with open(original_path, 'r') as mesh:
        db.put_attachment(m,
                          mesh,
                          filename='original' +
                          os.path.splitext(original_path)[1])
    if cloud_path:
        with open(cloud_path, 'r') as mesh:
            db.put_attachment(m,
                              mesh,
                              filename='cloud.ply',
                              content_type='application/octet-stream')
    #else:
    # TODO: convert the original to mesh/cloud if not given
    if mesh_path:
        with open(mesh_path, 'r') as mesh:
            db.put_attachment(m,
                              mesh,
                              filename='mesh.stl',
                              content_type='application/octet-stream')
コード例 #6
0
ファイル: train.py プロジェクト: zoq/AdaCoF-pytorch
def main():
    args = parser.parse_args()
    torch.cuda.set_device(args.gpu_id)

    dataset = DBreader_Vimeo90k(args.train,
                                random_crop=(args.patch_size, args.patch_size))
    TestDB = Middlebury_other(args.test_input, args.gt)
    train_loader = DataLoader(dataset=dataset,
                              batch_size=args.batch_size,
                              shuffle=True,
                              num_workers=0)
    model = models.Model(args)
    loss = losses.Loss(args)

    start_epoch = 0
    if args.load is not None:
        checkpoint = torch.load(args.load)
        model.load(checkpoint['state_dict'])
        start_epoch = checkpoint['epoch']

    my_trainer = Trainer(args, train_loader, TestDB, model, loss, start_epoch)

    now = datetime.datetime.now().strftime('%Y-%m-%d-%H:%M:%S')
    with open(args.out_dir + '/config.txt', 'a') as f:
        f.write(now + '\n\n')
        for arg in vars(args):
            f.write('{}: {}\n'.format(arg, getattr(args, arg)))
        f.write('\n')

    while not my_trainer.terminate():
        my_trainer.train()
        my_trainer.test()

    my_trainer.close()
コード例 #7
0
def test__validate_assortativity():
    """Testing validation of assortativity attribute."""

    # assortativity must be either 'positive' or 'negative'
    invalid_assortativity = 'invalid_assortativity'

    with nose.tools.assert_raises(AttributeError):
        models.Model(invalid_assortativity, workers, firms, valid_F,
                     valid_F_params)

    # assortativity must be a string
    invalid_assortativity = 0.0

    with nose.tools.assert_raises(AttributeError):
        models.Model(invalid_assortativity, workers, firms, valid_F,
                     valid_F_params)
コード例 #8
0
def handle_new_tread(request):
    #lib.utils.get_board_name_from_referer(request['HTTP_REFERER'])
    request.POST["pname"] = wakaba(strip_tags(request.POST["pname"]))
    request.POST["pmail"] = wakaba(strip_tags(request.POST["pmail"]))
    request.POST["ptitle"] = wakaba(strip_tags(request.POST["ptitle"]))
    request.POST["ppost"] = wakaba(strip_tags(request.POST["ppost"]))
    model = models.Model()
    if request.POST["pname"] == '':
        request.POST["pname"] = "Anonymous"

    NewRecord = models.get_simple_record(name=request.POST["pname"],
                                         email=request.POST["pmail"],
                                         title=request.POST["ptitle"],
                                         post=request.POST["ppost"])
    MyBoard = models.get_simple_board(
        adr=lib.utils.get_board_name_from_referer(request['HTTP_REFERER']))
    model.add_new_tread_to_board_by_record(NewRecord, MyBoard)
    newstr = str(lib.utils.get_board_name_from_referer(
        request['HTTP_REFERER'])) + ", " + str(request.POST["pname"])
    con = {
        "lol": newstr,
        "boardname":
        lib.utils.get_board_name_from_referer(request['HTTP_REFERER'])
    }
    return HttpResponse(Template('treadcreated.html').render(Context(con)))
コード例 #9
0
ファイル: first.py プロジェクト: TvoroG/PyImgBoard
def board(request, name):
    model = models.Model()
    all_categorys = model.get_all_categorys()
    all_boards = []
    for category in all_categorys:
        boards_from = model.get_all_boards_from_category(category)
        all_boards += boards_from

    board = model.get_board_by_adr(adr=name)
    all_treads = model.get_all_treads_by_date(board)

    class obert():
        def __init__(self, mid, name):
            self.id = mid
            self.name = name
           
    id_all_treads = [x.id for x in all_treads]
    treads = []
    for tread in all_treads:
        treads.append(obert(tread.id, 
                            model.get_all_records_from(tread, board)))

    template = Template('boards.html')
    context = Context({'treads': treads, 'board': board, 
                       'all_boards': all_boards})
    return HttpResponse(template.render(context))
コード例 #10
0
ファイル: mStructures.py プロジェクト: nrnhines/bfilt
 def __init__(self):
     fitglobals.debugoff()
     P = noise.NoiseParams()
     P.tstop = 20
     P.dt = 0.1
     A0 = 0.5
     A1 = 0.1
     P.A = numpy.matrix([[A0, 0], [0, A1]])
     # P.B, next line define noise injected to each component, uncorrelated
     P.B = numpy.matrix([[0.5, 0], [0, 0.4]])
     P.InitialCov = numpy.matrix([[1, 0], [0, 1]])
     elist = numpy.arange(0.1, 20, 0.1).tolist()
     elist2 = numpy.arange(0.05, 20.0, 0.05).tolist()
     O1 = models.ObserveState0(P, 5)
     O2 = models.ObserveStateSum(P, 6)
     # O1.Times.set([2,4,6,8,10,12,14,16,18,20])
     # O2.Times.set([3,6,9,12,15,18])
     O1.Times.set(elist)
     O2.Times.set(elist)
     O1.sigma = 0.001
     O2.sigma = 0.0001
     Obs = models.ObservationModel(P, 5, [O1, O2])
     Sys = models.DecayModel(P, 0, 2)
     # Sys.Injection.set([1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20])
     Sys.Injection.set(elist2)
     Initial = numpy.matrix([[1.0], [2.0]])
     self.M = models.Model(Sys, Obs, P, Initial)
     self.sim(False)
コード例 #11
0
    def __init__(self):
        """Init."""
        QMainWindow.__init__(self)
        Ui_MainWindow.__init__(self)
        self.setupUi(self)
        self.horizontalSliderElements.setVisible(False)
        self.labelLimit.setVisible(False)
        self.lineEditElements.setVisible(False)
        self.lineEditElements.setText("512")
        self.listWidget.addItems(listModels())
        self.model = models.Model()
        self.loadMaterials()
        self.loadSections()
        self.loadSectionImage()
        self._isUp2date = True
        self._showAgain = True

        self.dockWidget.topLevelChanged.connect(self.updateWindowSize)
        self.listWidget.currentTextChanged.connect(self.modelChanged)
        self.comboBoxMaterials.currentTextChanged.connect(self.materialChanged)
        self.comboBoxSections.currentTextChanged.connect(self.sectionChanged)
        self.comboBoxConditions.currentTextChanged.connect(
            self.conditionsChanged)
        self.comboBoxResults.currentTextChanged.connect(self.updateGraph)
        self.horizontalSliderElements.valueChanged.connect(
            self.elementsNumberChanged)
        self.pushButtonStartComputation.clicked.connect(self.compute)
        self.pushButtonSave.clicked.connect(self.saveFigure)
        self.pushButtonExcel.clicked.connect(self.saveExcel)
        self.pushButtonPlotMatrix.clicked.connect(self.plotMatrix)
        self.mpl.canvas.depassement.connect(self.depassement)
        self.initWatchDog()
コード例 #12
0
def main():
    args = parser.parse_args()
    torch.cuda.set_device(args.gpu_id)

    config_file = open(args.config, 'r')
    while True:
        line = config_file.readline()
        if not line:
            break
        if line.find(':') == '0':
            continue
        else:
            tmp_list = line.split(': ')
            if tmp_list[0] == 'kernel_size':
                args.kernel_size = int(tmp_list[1])
            if tmp_list[0] == 'flow_num':
                args.flow_num = int(tmp_list[1])
            if tmp_list[0] == 'dilation':
                args.dilation = int(tmp_list[1])
    config_file.close()

    model = models.Model(args)

    checkpoint = torch.load(args.checkpoint, map_location=torch.device('cpu'))
    model.load(checkpoint['state_dict'])

    frame_name1 = args.first_frame
    frame_name2 = args.second_frame

    frame1 = to_variable(transform(Image.open(frame_name1)).unsqueeze(0))
    frame2 = to_variable(transform(Image.open(frame_name2)).unsqueeze(0))

    model.eval()
    frame_out = model(frame1, frame2)
    imwrite(frame_out.clone(), args.output_frame, range=(0, 1))
def _test(args):
  mention = "Deutsche Bundesbank balance of payments statistics"
  text = "We use the Deutsche Bundesbank balance of payments statistics as our main source of data."
  assert args.load
  test_fname = args.eval_data
  data_gens = get_datasets([(test_fname, 'test', args.goal)], args, text, mention)
  model = models.Model(args, constant.ANSWER_NUM_DICT[args.goal])
  model.cuda()
  model.eval()
  load_model(args.reload_model_name, constant.EXP_ROOT, args.model_id, model)
  
  for name, dataset in [(test_fname, data_gens[0])]:
     #print('Processing... ' + name)
     total_gold_pred = []
     total_probs = []
     total_ys = []
     total_annot_ids = []
     for batch_num, batch in enumerate(dataset):
       eval_batch, annot_ids = to_torch(batch)
       loss, output_logits = model(eval_batch, args.goal)
       output_index = get_output_index(output_logits)
       output_prob = model.sigmoid_fn(output_logits).data.cpu().clone().numpy()
       y = eval_batch['y'].data.cpu().clone().numpy()
       gold_pred = get_gold_pred_str(output_index, y, args.goal)
       total_probs.extend(output_prob)
       total_ys.extend(y)
       total_gold_pred.extend(gold_pred)
       total_annot_ids.extend(annot_ids)
コード例 #14
0
def _test(args):
    assert args.load
    test_fname = args.eval_data
    model = models.Model(args, constant.ANSWER_NUM_DICT[args.goal])
    model.cuda()
    model.eval()
    # load_model(args.reload_model_name, constant.EXP_ROOT, args.model_id, model)

    if args.goal == "onto":
        saved_path = constant.EXP_ROOT_ONTO
    else:
        saved_path = constant.EXP_ROOT
    model.load_state_dict(
        torch.load(saved_path + '/' + args.model_id +
                   '_best.pt')["state_dict"])

    data_gens = get_datasets([(test_fname, 'test', args.goal)],
                             args,
                             eval_epoch=1)
    for name, dataset in [(test_fname, data_gens[0])]:
        print('Processing... ' + name)

        batch = next(dataset)
        eval_batch, _ = to_torch(batch)
        loss, output_logits = model(eval_batch, args.goal)

        threshes = np.arange(0, 1, 0.02)
        # threshes = [0.65, 0.68, 0.7, 0.71]
        # threshes = [0.5]
        p_and_r = []
        for thresh in tqdm(threshes):
            total_gold_pred = []
            total_probs = []
            total_ys = []
            print('\nthresh {}'.format(thresh))
            output_index = get_output_index(output_logits, thresh)
            output_prob = model.sigmoid_fn(
                output_logits).data.cpu().clone().numpy()
            y = eval_batch['y'].data.cpu().clone().numpy()
            gold_pred = get_gold_pred_str(output_index, y, args.goal)

            total_probs.extend(output_prob)
            total_ys.extend(y)
            total_gold_pred.extend(gold_pred)
            # mrr_val = mrr(total_probs, total_ys)

            # json.dump(gold_pred, open('nomulti_predictions.json', 'w'))
            # np.save('y', total_ys)
            # np.save('probs', total_probs)

            # print('mrr_value: ', mrr_val)
            # result, eval_str = metric_dicts(total_gold_pred)
            result, eval_str = fine_grained_eval(total_gold_pred)

            # fine_grained_eval(total_gold_pred)

            p_and_r.append([result["ma_precision"], result["ma_recall"]])
            print(eval_str)

        np.save(saved_path + '/{}_pr_else_dev'.format(args.model_id), p_and_r)
コード例 #15
0
def visualize(args):
    saved_path = constant.EXP_ROOT
    model = models.Model(args, constant.ANSWER_NUM_DICT[args.goal])
    model.cuda()
    model.eval()
    model.load_state_dict(
        torch.load(saved_path + '/' + args.model_id +
                   '_best.pt')["state_dict"])

    label2id = constant.ANS2ID_DICT["open"]
    visualize = SummaryWriter("../visualize/" + args.model_id)
    # label_list = ["person", "leader", "president", "politician", "organization", "company", "athlete","adult",  "male",  "man", "television_program", "event"]
    label_list = list(label2id.keys())
    ids = [label2id[_] for _ in label_list]
    if args.gcn:
        # connection_matrix = model.decoder.label_matrix + model.decoder.weight * model.decoder.affinity
        connection_matrix = model.decoder.label_matrix + model.decoder.weight * model.decoder.affinity
        label_vectors = model.decoder.transform(
            connection_matrix.mm(model.decoder.linear.weight) /
            connection_matrix.sum(1, keepdim=True))
    else:
        label_vectors = model.decoder.linear.weight.data

    interested_vectors = torch.index_select(
        label_vectors, 0,
        torch.tensor(ids).to(torch.device("cuda")))
    visualize.add_embedding(interested_vectors,
                            metadata=label_list,
                            label_img=None)
コード例 #16
0
ファイル: first.py プロジェクト: TvoroG/PyImgBoard
def adminum(request):
    model = models.Model()
    if request.method == 'POST':
        try: 
            if request.POST.has_key('cname'):
                model.add_new_category(models.get_simple_category(
                                                lib.utils.get_normal_string(request.POST["cname"])))
            else:
                model.add_new_board_to_category(
                                                models.get_simple_board(
                                                                lib.utils.get_normal_string(request.POST["bname"]),
                                                                lib.utils.get_normal_string(request.POST["badr"]), 
                                                                request.POST["cat"])
                                                , model.get_category_by_id(request.POST["cat"]))
        except KeyError as e:
            return HttpResponse(str(request.POST)+str(request.POST.has_key('cname')))
        
    categ = model.get_all_categorys()
    newcateg = []
    for cat in categ:
        catboard = model.get_all_boards_from_category(cat)
        newcateg.append({cat.name: catboard})

    return HttpResponse(Template('admin.html').render(Context({
                                                         "categorys": categ, 
                                                         "categ_list": newcateg})))

          
コード例 #17
0
def handle_new_record(request):
    model = models.Model()
    request.POST["pname"] = wakaba(strip_tags(request.POST["pname"]))
    request.POST["pmail"] = wakaba(strip_tags(request.POST["pmail"]))
    request.POST["ptitle"] = wakaba(strip_tags(request.POST["ptitle"]))
    request.POST["ppost"] = wakaba(strip_tags(request.POST["ppost"]))
    if request.POST["pname"] == '':
        request.POST["pname"] = "Anonymous"
    NewRecord = models.get_simple_record(name=request.POST["pname"],
                                         email=request.POST["pmail"],
                                         title=request.POST["ptitle"],
                                         post=request.POST["ppost"])
    MyBoard = models.get_simple_board(adr=request.POST["boardadr"])
    MyTread = model.get_tread_by_id(request.POST["treadid"], MyBoard)

    model.insert_record_into(MyTread, NewRecord, MyBoard)

    newstr = str(request.POST["boardadr"]) + ", в треде " + str(
        request.POST["treadid"]) + ", " + str(request.POST["pname"])
    con = {
        "lol": newstr,
        "treadname": request.POST["treadid"],
        "boardname": request.POST["boardadr"]
    }
    return HttpResponse(Template('recordcreated.html').render(Context(con)))
コード例 #18
0
ファイル: test.py プロジェクト: rafalsliwinski3991/BuyBase
    def __init__(self):
        self.cview = view.BuyBaseApp()

        self.cmod = models.Model(
            self.cview.frames[view.ConfigurationPage].userE.get(),
            self.cview.frames[view.ConfigurationPage].passE.get(),
            self.cview.frames[view.ConfigurationPage].hostE.get(),
            self.cview.frames[view.ConfigurationPage].dataE.get())

        # adding data method
        self.cview.frames[view.HomePage].addB.config(command=self.add_dataV)
        self.display_data()
        # CONFIGURATTION PAGE----------------------------------------------------------------------------------------------
        # connect with database in configurationPage
        self.cview.frames[view.ConfigurationPage].connB.config(
            command=self.cmod_call)
        # clearing data in configuration page
        self.cview.frames[view.ConfigurationPage].clearB.config(
            command=self.cview.frames[view.ConfigurationPage].clear_click)
        # closing configuration page
        self.cview.frames[view.ConfigurationPage].closB.config(
            command=lambda: self.cview.show_frame(view.HomePage))
        #making a checkbuttons
        self.making_check()
        #making plot
        self.make_plot()
コード例 #19
0
    def test_lookup_oids(self):
        oids = {}
        oids['octets'] = models.OID(rx='abc.{ifIndex}', tx='def.{ifIndex}')
        model = models.Model(name='ok', pattern='foo', oids=oids)

        actual = model.lookup_oids('octets', '10')

        self.assertItemsEqual(('abc.10', 'def.10'), actual)
コード例 #20
0
ファイル: first.py プロジェクト: TvoroG/PyImgBoard
def b(request):
    template = Template('b.html')
    test = models.Model()
    tread = models.Tread(1, now_timestamp())
    testlist = test.get_all_records_from(tread)
    context = Context({'lol': testlist})
    result = template.render(context)
    return HttpResponse(result)
コード例 #21
0
def main():
    args = parser.parse_args()
    torch.cuda.set_device(args.gpu_id)

    if args.config is not None:
        config_file = open(args.config, 'r')
        while True:
            line = config_file.readline()
            if not line:
                break
            if line.find(':') == 0:
                continue
            else:
                tmp_list = line.split(': ')
                if tmp_list[0] == 'kernel_size':
                    args.kernel_size = int(tmp_list[1])
                if tmp_list[0] == 'dilation':
                    args.dilation = int(tmp_list[1])
        config_file.close()

    model = models.Model(args)

    print('Loading the model...')

    checkpoint = torch.load(args.checkpoint, map_location=torch.device('cpu'))
    model.load(checkpoint['state_dict'])
    current_epoch = checkpoint['epoch']

    print('Test: Middlebury_eval')
    test_dir = args.out_dir + '/middlebury_eval'
    test_db = TestModule.Middlebury_eval('./test_input/middlebury_eval')
    if not os.path.exists(test_dir):
        os.makedirs(test_dir)
    test_db.Test(model, test_dir)

    print('Test: Middlebury_others')
    test_dir = args.out_dir + '/middlebury_others'
    test_db = TestModule.Middlebury_other(
        './test_input/middlebury_others/input',
        './test_input/middlebury_others/gt')
    if not os.path.exists(test_dir):
        os.makedirs(test_dir)
    test_db.Test(model, test_dir, current_epoch, output_name='frame10i11.png')

    print('Test: DAVIS')
    test_dir = args.out_dir + '/davis'
    test_db = TestModule.Davis('./test_input/davis/input',
                               './test_input/davis/gt')
    if not os.path.exists(test_dir):
        os.makedirs(test_dir)
    test_db.Test(model, test_dir, output_name='frame10i11.png')

    print('Test: UCF101')
    test_dir = args.out_dir + '/ucf101'
    test_db = TestModule.ucf('./test_input/ucf101')
    if not os.path.exists(test_dir):
        os.makedirs(test_dir)
    test_db.Test(model, test_dir, output_name='frame1.png')
コード例 #22
0
    def test_oid_names(self):
        oids = {}
        oids['octets'] = models.OID(rx='abc', tx='def')
        oids['discards'] = models.OID(rx='ghi', tx='jkl')
        model = models.Model(name='ok', pattern='foo', oids=oids)

        actual = model.oid_names()

        self.assertItemsEqual(['octets', 'discards'], actual)
コード例 #23
0
ファイル: train.py プロジェクト: altosaar/vimco_tf
def train(config):
    """Train sigmoid belief net on MNIST."""
    cfg = config

    # data
    data_iterator, np_data_mean, _ = util.provide_data(cfg)
    input_data = tf.placeholder(cfg['dtype'],
                                [cfg['batch_size']] + cfg['data/shape'])
    data_mean = tf.placeholder(cfg['dtype'], cfg['data/shape'])
    input_data_centered = input_data - tf.expand_dims(data_mean, 0)
    tf.image_summary('data', input_data, max_images=cfg['batch_size'])

    def feed_fn():
        _, images = data_iterator.next()
        return {input_data: images, data_mean: np_data_mean}
        # return {input_data: np.random.binomial(1, 0.5, size=(cfg['batch_size'],28,28,1)), data_mean: 0. * np_data_mean + 0.5}

    model = models.Model(cfg)
    variational = models.Variational(cfg)
    inference = inferences.VariationalInference(cfg, model, variational,
                                                input_data,
                                                input_data_centered)
    model_vars = fw.get_model_variables('model')
    variational_vars = fw.get_model_variables('variational')
    summary_op = tf.merge_all_summaries()
    saver = tf.train.Saver()

    # train
    if not cfg['eval_only']:
        learn.train(graph=tf.get_default_graph(),
                    output_dir=cfg['log/dir'],
                    train_op=inference.train_op,
                    loss_op=tf.reduce_mean(inference.vimco_elbo),
                    feed_fn=feed_fn,
                    supervisor_save_summaries_steps=1000,
                    log_every_steps=1000,
                    max_steps=cfg['optim/n_iterations'])

    # evaluate likelihood on validation set
    with tf.Session() as sess:
        saver.restore(sess, tf.train.latest_checkpoint(cfg['log/dir']))
        np_log_x = 0.
        np_l = 0.
        cfg.update({'data': {'split': 'valid', 'n_examples': 10000}})
        data_iterator, np_data_mean, _ = util.provide_data(cfg)
        for i in range(cfg['data/n_examples'] / cfg['data/batch_size']):
            _, images = data_iterator.next()
            tmp_np_log_x, tmp_np_l = sess.run(
                [model.log_likelihood_tensor, inference.vimco_elbo], {
                    input_data: images,
                    data_mean: np_data_mean
                })
            np_log_x += tmp_np_log_x
            np_l += np.sum(tmp_np_l)
        print(
            'for validation set -- elbo: %.3f\tlog_likelihood: %.3f' %
            (np_l / cfg['data/n_examples'], np_log_x / cfg['data/n_examples']))
コード例 #24
0
 def get_attention_model(self, model, n):
     # model -> attention weights을 반환하는 모델로 변환한다.
     outs = models.ClassificationModel().attention_block(
         attention_only=True)
     attention_model = models.Model(inputs=outs[0], outputs=outs[1])
     for i in range(len(attention_model.layers)):
         attention_model.layers[i].set_weights(
             model.layers[i * 2 + n].get_weights())
     return attention_model
コード例 #25
0
def test_theta_prime():
    """Testing symbolic expression for firm size differential equation."""
    mod1 = models.Model('negative', workers, firms, valid_F, valid_F_params)
    mod2 = models.Model('positive', workers, firms, valid_F, valid_F_params)

    # theta_prime not implemented in base DifferentiableMatching class
    matching = models.DifferentiableMatching(mod1)
    with nose.tools.assert_raises(NotImplementedError):
        matching.theta_prime

    # y, l, and r should not appear in either mu_prime or theta_prime
    for var in [y, l, r]:
        nose.tools.assert_false(var in mod1.matching.theta_prime.atoms())
        nose.tools.assert_false(var in mod2.matching.theta_prime.atoms())

    # mu and theta should appear in both mu_prime or theta_prime
    nose.tools.assert_true({mu, theta} < mod1.matching.theta_prime.atoms())
    nose.tools.assert_true({mu, theta} < mod2.matching.theta_prime.atoms())
コード例 #26
0
ファイル: batch_process.py プロジェクト: blawney/mycalc
def process_batch(df, eqn_file):
    """
    df is a Pandas DataFrame instance.
    eqn_file is a formatted model file

    It should have column names that match the species given in the model file so we 
    can map the dataframe's values to the initial conditions properly.

    Furthermore, the values in the dataframe should all have the same units-- we make no 
    consideration for the relative units here-- that should all be cleared up prior to 
    calling this function.
    """

    factory = reaction_factories.FileReactionFactory(eqn_file)
    model = models.Model(factory)
    solver = model_solvers.ODESolverWJacobian(model)

    # get all the species given in the model file:
    species_set = set()
    for rx in factory.get_reactions():
        species_set = species_set.union(rx.get_all_species())

    # From a general model file we cannot judge which species are necessary for creating
    # a sensible reaction system.  We also would like to allow more columns than just the
    # data we're operating on, so we want to ignore those columns.
    col_set = set(df.columns.tolist())
    if len(col_set.intersection(species_set)) == 0:
        raise BatchCalculationException(
            'The input dataframe did not contain any columns headers in common with our reaction system.'
        )

    def process(row, species_set):
        """
        A method for using with apply on the row axis
        sets up the initiial conditions and runs until convergence
        returns a pandas Series with the equilibrium concentrations of all species
        """
        ic = {}
        for s in species_set:
            try:
                ic[s] = row[s]
            except KeyError as ex:
                ic[s] = 0.0

        sample_to_column_mapping, solution, t = solver.equilibrium_solution(
            X0=ic)

        final_vals = solution[-1, :]
        index = [''] * len(final_vals)
        for sample, col_idx in sample_to_column_mapping.items():
            index[col_idx] = sample
        s = pd.Series(final_vals, index=index)
        return s

    results = df.apply(process, args=(species_set, ), axis=1)
    df = pd.concat([df, results], axis=1)
    return df
コード例 #27
0
    def __init__(self, model):

        # vocabulary
        self.code_vocab = utils.load_vocab_pk(config.code_vocab_path)
        self.code_vocab_size = len(self.code_vocab)
        self.ast_vocab = utils.load_vocab_pk(config.ast_vocab_path)
        self.ast_vocab_size = len(self.ast_vocab)
        self.nl_vocab = utils.load_vocab_pk(config.nl_vocab_path)
        self.nl_vocab_size = len(self.nl_vocab)

        # dataset
        self.dataset = data.CodePtrDataset(code_path=config.test_code_path,
                                           ast_path=config.test_sbt_path,
                                           nl_path=config.test_nl_path)
        self.dataset_size = len(self.dataset)
        self.dataloader = DataLoader(
            dataset=self.dataset,
            batch_size=config.test_batch_size,
            collate_fn=lambda *args: utils.unsort_collate_fn(
                args,
                code_vocab=self.code_vocab,
                ast_vocab=self.ast_vocab,
                nl_vocab=self.nl_vocab,
                raw_nl=True))

        # model
        if isinstance(model, str):
            self.model = models.Model(code_vocab_size=self.code_vocab_size,
                                      ast_vocab_size=self.ast_vocab_size,
                                      nl_vocab_size=self.nl_vocab_size,
                                      model_file_path=os.path.join(
                                          config.model_dir, model),
                                      is_eval=True)
        elif isinstance(model, dict):
            self.model = models.Model(code_vocab_size=self.code_vocab_size,
                                      ast_vocab_size=self.ast_vocab_size,
                                      nl_vocab_size=self.nl_vocab_size,
                                      model_state_dict=model,
                                      is_eval=True)
        else:
            raise Exception(
                'Parameter \'model\' for class \'Test\' must be file name or state_dict of the model.'
            )
コード例 #28
0
def test_wages():
    """Testing symbolic expression for workers wages."""
    mod = models.Model('positive', workers, firms, valid_F, valid_F_params)

    # y, l, and r should not appear in workers wages
    for var in [y, l, r]:
        nose.tools.assert_false(var in mod.matching.wage.atoms())

    # mu and theta should appear in both mu_prime or theta_prime
    nose.tools.assert_true({mu, theta} < mod.matching.wage.atoms())
def test_one_domain(opt):
    # Use specific GPU
    device = torch.device(opt.gpu_num)

    # Dataloader
    test_dataset_file_path = os.path.join('../dataset', opt.target_domain,
                                          'dataset.csv')
    test_loader = get_dataloader(test_dataset_file_path, 'test', opt)

    if opt.target_domain == 'MSP-Improv':
        folder_num = 6
    else:
        folder_num = 5

    test_loader_list = []
    for i in range(folder_num):
        dataset_file_path = os.path.join('../dataset', opt.target_domain,
                                         str(i), 'test.csv')
        loader = get_dataloader(dataset_file_path, 'test', opt)
        test_loader_list.append(loader)

    # Model and loss function
    emotion_recognizer = models.Model(opt)
    emotion_recognizer.to(device)

    criterion = torch.nn.CrossEntropyLoss()

    checkpoint = torch.load(os.path.join('checkpoints', opt.model,
                                         opt.source_domain, 'model.pth.tar'),
                            map_location=device)
    emotion_recognizer.load_state_dict(checkpoint['emotion_recognizer'])

    test_loss, test_acc, test_uar = test(test_loader, emotion_recognizer,
                                         criterion, device)

    acc_list = []
    uar_list = []
    for i in range(folder_num):
        loader = test_loader_list[i]
        _, acc, uar = test(loader, emotion_recognizer, criterion, device)
        acc_list.append(acc)
        uar_list.append(uar)

    acc_list = np.array(acc_list)
    uar_list = np.array(uar_list)

    acc_std = np.std(acc_list)
    uar_std = np.std(uar_list)

    print('epoch: {}'.format(checkpoint['epoch']),
          'test_loss: {0:.5f}'.format(test_loss),
          'test_acc: {0:.3f}'.format(test_acc),
          'test_uar: {0:.3f}'.format(test_uar),
          'acc_std: {0:.3f}'.format(acc_std),
          'uar_std: {0:.3f}'.format(uar_std))
コード例 #30
0
def train(config):
    """Train sigmoid belief net on MNIST."""
    cfg = config
    if cfg['log/clear_dir']:
        util.remove_dir(cfg)

    # data
    data_iterator, np_data_mean, _ = util.provide_data(cfg)
    input_data = tf.placeholder(cfg['dtype'],
                                [cfg['batch_size']] + cfg['data/shape'])
    data_mean = tf.placeholder(cfg['dtype'], cfg['data/shape'])
    input_data_centered = input_data - tf.expand_dims(data_mean, 0)
    tf.image_summary('data', input_data, max_images=cfg['batch_size'])

    def feed_fn():
        _, images = data_iterator.next()
        return {input_data: images, data_mean: np_data_mean}

    model = models.Model(cfg)
    variational = models.Variational(cfg)
    inference = inferences.VariationalInference(cfg, model, variational,
                                                input_data,
                                                input_data_centered)
    model_vars = fw.get_model_variables('model')
    variational_vars = fw.get_model_variables('variational')
    summary_op = tf.merge_all_summaries()
    saver = tf.train.Saver()

    # train
    learn.train(
        graph=tf.get_default_graph(),
        output_dir=cfg['log/dir'],
        train_op=inference.train_op,
        loss_op=inference.vimco_elbo,
        feed_fn=feed_fn,
        supervisor_save_summaries_steps=100,
        log_every_steps=100,
        # max_steps=1)
        max_steps=cfg['optim/n_iterations'])

    # evaluate likelihood on validation set
    with tf.Session() as sess:
        saver.restore(sess, tf.train.latest_checkpoint(cfg['log/dir']))
        np_log_x = 0.
        cfg.update({'data': {'split': 'valid', 'n_examples': 10000}})
        data_iterator, np_data_mean, _ = util.provide_data(cfg)
        for i in range(cfg['data/n_examples'] / cfg['data/batch_size']):
            _, images = data_iterator.next()
            np_log_x += sess.run(model.log_likelihood_tensor, {
                input_data: images,
                data_mean: np_data_mean
            })
        print('log-likelihood on evaluation set is: ',
              np_log_x / cfg['data/n_examples'])