コード例 #1
0
ファイル: main.py プロジェクト: ajayrfhp/Bagnet
def main():
    parser = argparse.ArgumentParser()
    parser.add_argument("-fit", nargs='?', default=False)
    parser.add_argument("-model", nargs='?', default="simple_model")
    parser.add_argument("-visualize", nargs='?', default=False)
    parser.add_argument("-evaluate", nargs='?')
    parser.add_argument("-dataset")
    parser.add_argument("-model_path", nargs='?')
    parser.add_argument("-visualize_heatmap", nargs='?')

    args = parser.parse_args()
    if args.dataset:
        if args.dataset == 'fashion_mnist':
            train_loader, test_loader = fashion_mnist_dataset.get_data_loaders(
            )
            visualize = fashion_mnist_dataset.visualize_dataset
        elif args.dataset == "dogs_cats":
            train_loader, test_loader = dogs_cats_dataset.get_data_loaders()
            visualize = dogs_cats_dataset.visualize_dataset
    if args.model:
        if args.model == 'simple_model':
            model = SimpleModel()
        if args.model == 'explain_model':
            model = ExplainModel()
    if args.fit:
        fit_classifier(model, train_loader, test_loader, args.model)
    elif args.visualize:
        visualize(train_loader)
    elif args.evaluate and args.model_path:
        model = load_model(model_path)
        evaluate(model, test_loader)
    elif args.visualize_heatmap and args.model_path:
        load_visualize_heatmap(args.model_path, test_loader)
コード例 #2
0
ファイル: executor.py プロジェクト: istalker2/MuranoDsl
    def _evaluate_parameters(self, arguments_scheme, context, this, *args):
        arg_names = list(arguments_scheme.keys())
        parameter_values = {}
        i = 0
        for arg in args:
            value = helpers.evaluate(arg, context)
            if isinstance(value, types.TupleType) and len(value) == 2 and \
                    isinstance(value[0], types.StringTypes):
                name = value[0]
                value = value[1]
                if name not in arguments_scheme:
                    raise TypeError()
            else:
                if i >= len(arg_names):
                    raise TypeError()
                name = arg_names[i]
                i += 1

            if callable(value):
                value = value()
            arg_spec = arguments_scheme[name]
            parameter_values[name] = arg_spec.validate(
                value, this, self._root_context, self._object_store)

        for name, arg_spec in arguments_scheme.iteritems():
            if name not in parameter_values:
                if not arg_spec.has_default:
                    raise TypeError()
                parameter_values[name] = arg_spec.validate(
                    helpers.evaluate(arg_spec.default, context),
                    this, self._root_context, self._object_store)

        return parameter_values
コード例 #3
0
    def fit(
        self, X_train, Y_train, X_val, Y_val,
        n_epoch=10, n_batch=100, logname='run',
    ):
        """Train the model"""
        alpha = 1.0  # learning rate, which can be adjusted later
        n_data = len(X_train)
        n_superbatch = self.n_superbatch

        for epoch in range(n_epoch):
            # In each epoch, we do a full pass over the training data:
            train_batches, train_err, train_acc = 0, 0, 0
            start_time = time.time()

            # iterate over superbatches to save time on GPU memory transfer
            for X_sb, Y_sb in self.iterate_superbatches(
                X_train, Y_train, n_superbatch,
                datatype='train', shuffle=True,
            ):
                for idx1, idx2 in iterate_minibatch_idx(len(X_sb), n_batch):
                    err, acc = self.train(idx1, idx2, alpha)
                    # collect metrics
                    train_batches += 1
                    train_err += err
                    train_acc += acc
                    if train_batches % 100 == 0:
                        n_total = epoch * n_data + n_batch * train_batches
                        metrics = [
                            n_total,
                            train_err / train_batches,
                            train_acc / train_batches,
                        ]
                        log_metrics(logname, metrics)

            print "Epoch {} of {} took {:.3f}s ({} minibatches)".format(
                epoch + 1, n_epoch, time.time() - start_time, train_batches)

            # make a full pass over the training data and record metrics:
            train_err, train_acc = evaluate(
                self.loss, X_train, Y_train, batchsize=100,
            )
            val_err, val_acc = evaluate(
                self.loss, X_val, Y_val, batchsize=100,
            )

            print "  training:\t\t{:.6f}\t{:.6f}".format(
                train_err, train_acc
            )
            print "  validation:\t\t{:.6f}\t{:.6f}".format(
                val_err, val_acc
            )

            metrics = [epoch, train_err, train_acc, val_err, val_acc]
            log_metrics(logname + '.val', metrics)
コード例 #4
0
ファイル: app.py プロジェクト: ataylor296/HackGT19
def home_modal():
    if request.method == 'POST':
        data = request.form['url']
        title, score = helpers.evaluate(data, weights)
        print(title, score)
        return render_template("home_modal.html", title = title, url = data, score = score)
    return render_template("home_modal.html", title = "", url = "", score = 0)
コード例 #5
0
    def __inner_evaluate_model(self, predictions_dict):
        if self.truth_dictionary is None:
            print("[ERROR] No ground truth dictionary defined")
            sys.exit()

        rmse_list = []
        for i in predictions_dict.keys():
            pred = predictions_dict[i]
            truth = self.truth_dictionary[i]

            rmse = evaluate(pred, truth)
            rmse_list.append(rmse)
        return np.mean(rmse_list)
コード例 #6
0
ファイル: main.py プロジェクト: brucevdkooij/jinni-tools
def jinni_findSuggestionsWithFilters(query):
    logging.info(u'Doing a suggestion search for "{0}"...'.format(query))
    
    url = "http://www.jinni.com/dwr/call/plaincall/AjaxController.findSuggestionsWithFilters.dwr"
    values = {
        # Both the httpSessionId and scriptSessionId need to be submitted
        # or the server will respond with a "HTTP Error 501: Not Implemented".
        # However, they are not validated.
        # FIXME: when logged in for some reason you do need to send along a valid httpSessionId
        "httpSessionId": [cookie.value for cookie in cj if cookie.name == "JSESSIONID"][0],
        "scriptSessionId": "", # i.e. 3C675DDBB02222BE8CB51E2415259E99878
        "callCount": "1",
        "page": "/discovery.html",
        "c0-scriptName": "AjaxController",
        "c0-methodName": "findSuggestionsWithFilters",
        "c0-id": "0",
        "c0-param0": "string:{0}".format(query.encode("utf-8")),
        "c0-e1": "null:null",
        "c0-e2": "boolean:false",
        "c0-e3": "boolean:false",
        "c0-e4": "boolean:false",
        "c0-e5": "Array:[]",
        "c0-param1": "Object_Object:{contentTypeFilter:reference:c0-e1, onlineContentFilter:reference:c0-e2, dvdContentFilter:reference:c0-e3, theaterContentFilter:reference:c0-e4, contentAffiliates:reference:c0-e5}",
        "batchId": "2"
    }
    
    data = urllib.urlencode(values)
    request = urllib2.Request(url, data)
    response = open_url(request)
    content = response.read()
    
    js_tree = parse_js(content)
    tree = convert(js_tree)
    evaluate(js_tree, tree)
    
    results = tree["s1"]
    
    return results
コード例 #7
0
ファイル: main.py プロジェクト: beaverden/RockPaperScissorsAI
    def start(self) -> None:
        ties = 0
        p1 = 0
        p2 = 0

        for i in range(self.iterations):
            player1_move = self.player1.move()
            player2_move = self.player2.move()
            result = evaluate(player1_move, player2_move)

            if result == 0:
                ties += 1
            elif result == 1:
                p1 += 1
            else:
                p2 += 1
            self.player1.train(player2_move, player1_move, result)
            self.player2.train(player1_move, player2_move, result)
            # print('Player1: %s, Player2: %s, %s' % (player1_move, player2_move, r_str))
            print('Player1: %d, Player2: %d, Ties: %d' % (p1, p2, ties))
コード例 #8
0
ファイル: murano_object.py プロジェクト: istalker2/MuranoDsl
    def __set_property(self, key, value, caller_class=None):
        if key in self.__type.properties:
            spec = self.__type.get_property(key)
            if caller_class is not None and (
                            spec.type not in typespec.PropertyTypes.Writable or
                        not caller_class.is_compatible(self)):
                raise exceptions.NoWriteAccess(key)

            default = self.__defaults.get(key, spec.default)
            child_context = Context(parent_context=self.__context)
            child_context.set_data(self)
            default = helpers.evaluate(default, child_context, 1)

            self.__properties[key] = spec.validate(
                value, self, self.__context, self.__object_store, default)
        else:
            for parent in self.__parents.values():
                try:
                    parent.__set_property(key, value, caller_class)
                    return
                except AttributeError:
                    continue
            raise AttributeError(key)
コード例 #9
0
    total += nn
print('Number of parameters:', total)

# print example predictions and ground truths
print('Printing 8 random frame truths, predictions and output vectors...')
test_dl = DataLoader(testset, batch_size=8, shuffle=True)
dataiter = iter(test_dl)
image, label = dataiter.next()

#output = model(image)
#_, prediction = torch.max(output, 1)
#_, label = torch.max(label, 1)
#for t, p, o in zip(label, prediction, output):
#    print('Truth: ' + truth_table[t] + '; prediction: ' + truth_table[p])
#    print('Output: ', o)

print('\nValidating over the entire set...', flush=True)
acc, confusion_matrix = helpers.evaluate(testset,
                                         model,
                                         truth_table,
                                         device=device,
                                         verbose=True)
print('Frame accuracy: ' + str(acc) + '\n')

# If specified, add the suffix to the savepath
if suffix is not None:
    savepath = savepath[:-4] + '_' + suffix + '.png'
helpers.print_confusion_matrix(confusion_matrix, truth_table, savepath)

print('All done. Time:', helpers.time_since(start))
print('\n\n\n')
コード例 #10
0
    def fit(
        self, X_train, Y_train, X_val, Y_val,
        n_epoch=10, n_batch=100, logname='run',
    ):
        """Train the model"""
        alpha = 1.0  # learning rate, which can be adjusted later
        n_data = len(X_train)
        n_superbatch = self.n_superbatch

        # print '...', self.rbm.logZ_exact()
        # print '...', self.rbm.logZ_exact(marg='h')

        for epoch in range(n_epoch):
            # In each epoch, we do a full pass over the training data:
            train_batches, train_err, train_acc = 0, 0, 0
            start_time = time.time()

            # iterate over superbatches to save time on GPU memory transfer
            for X_sb, Y_sb in self.iterate_superbatches(
                X_train, Y_train, n_superbatch,
                datatype='train', shuffle=True,
            ):
                for idx1, idx2 in iterate_minibatch_idx(len(X_sb), n_batch):
                    # train model
                    self.rbm.reset_averages()
                    err, acc, z = self.train(idx1, idx2, alpha)

                    # train rbm
                    z_rbm = z.reshape((128,1,8,8))
                    # self.rbm.train_batch(z.reshape((128,1,8,8)), np.zeros((n_batch,), dtype='int32'), alpha)

                    # q steps
                    for i in range(2): self.rbm.train_q0(z_rbm)

                    # p steps
                    self.rbm.train_p_batch(z_rbm, alpha)

                    # collect metrics
                    # print err, acc
                    train_batches += 1
                    train_err += err
                    train_acc += acc
                    if train_batches % 100 == 0:
                        n_total = epoch * n_data + n_batch * train_batches
                        metrics = [
                            n_total,
                            train_err / train_batches,
                            train_acc / train_batches,
                        ]
                        log_metrics(logname, metrics)

            print "Epoch {} of {} took {:.3f}s ({} minibatches)".format(
                epoch + 1, n_epoch, time.time() - start_time, train_batches)

            # make a full pass over the training data and record metrics:
            train_err, train_acc = evaluate(
                self.loss, X_train, Y_train, batchsize=128,
            )
            val_err, val_acc = evaluate(
                self.loss, X_val, Y_val, batchsize=128,
            )

            print "  training:\t\t{:.6f}\t{:.6f}".format(
                train_err, train_acc
            )
            print "  validation:\t\t{:.6f}\t{:.6f}".format(
                val_err, val_acc
            )

            metrics = [epoch, train_err, train_acc, val_err, val_acc]
            log_metrics(logname + '.val', metrics)

        # reserve N of training data points to kick start hallucinations
        self.rbm.hallu_set = np.asarray(
            z[:100,:].reshape((100,1,8,8)),
            dtype=theano.config.floatX
        )
コード例 #11
0
ファイル: macros.py プロジェクト: tsufiev/MuranoDsl
 def execute(self, context, object_store, murano_class):
     raise exceptions.ReturnException(
         helpers.evaluate(self._value, context))
コード例 #12
0
def heatmap():
    clicked_strike = request.form.get("strike")

    bundles = request.form.get("bundles")
    targetmargin = request.form.get("targetmargin")
    minpayout = request.form.get("minpayout")
    maxpayout = request.form.get("maxpayout")
    kgperperson = int(request.form.get("kgperperson"))
    interest = float(request.form.get("interest"))
    deposit = float(request.form.get("deposit"))

    # region = request.form.get("region")

    res = db.execute(
        f"""SELECT strike, payouts, premiums FROM policies WHERE strike={clicked_strike} AND minpayout={minpayout} AND maxpayout={maxpayout} AND bundles={bundles} AND targetmargin={targetmargin} ORDER BY strike"""
    ).fetchone()

    payouts = pd.DataFrame(res.payouts).transpose()
    payouts.index = [int(x) for x in payouts.index]
    payouts = payouts.sort_index()

    premiums = pd.DataFrame(res.premiums).transpose()
    premiums.index = [int(x) for x in premiums.index]
    premiums = premiums.sort_index()

    # Calculate critical critloss DF and total_thresh
    crit_thresh_df = helpers.calc_crit(data,
                                       indexyields,
                                       loandeposit=deposit,
                                       loaninterest=interest,
                                       kg_per_mouth=kgperperson)

    cl_noins, cl_ins, clsr, clcr, clfr, rmsr, premsaspc, realisedmargin = helpers.evaluate(
        realyields,
        crit_thresh_df,
        payouts,
        premiums,
        data['farmarea'],
        startyear=startyear)

    # min_cl = cl_noins.min().min()
    payouts = payouts.loc[:, startyear:].sort_index(ascending=False)
    premiums = premiums.loc[:, startyear:].sort_index(ascending=False)
    crit_thresh_df = crit_thresh_df.loc[:,
                                        startyear:].sort_index(ascending=False)
    cl_ins = cl_ins.sort_index(ascending=False)
    cl_noins = cl_noins.sort_index(ascending=False)
    indexyieldstosend = indexyields.loc[:,
                                        startyear:].sort_index(ascending=False)
    realyieldstosend = realyields.loc[:,
                                      startyear:].sort_index(ascending=False)

    # if region != "All":
    #     cl_noins = cl_noins[:][data['group'] == region]
    #     cl_ins = cl_ins[:][data['group'] == region]
    #     indexyieldstosend = indexyieldstosend[:][data['group'] == region]
    #     regions = [region] * cl_ins.shape[0]
    # else:
    regions = data['group'].sort_index(ascending=False).tolist()

    improvement = (cl_ins - cl_noins)

    sitenames = [f"Farm {x}" for x in indexyieldstosend.index]

    heatmapdata = {
        "payouts": payouts.values.tolist(),
        "premiums": premiums.values.tolist(),
        "crit_thresh_df": crit_thresh_df.values.tolist(),
        "cl_noins": cl_noins.values.tolist(),
        "cl_ins": cl_ins.values.tolist(),
        "improvement": improvement.values.tolist(),
        "columns": cl_ins.columns.tolist(),
        "sitenames": sitenames,
        "regions": regions,
        # "min_cl": min_cl,
        "indexyields": indexyieldstosend.values.tolist(),
        "realyields": realyieldstosend.values.tolist()
    }

    return jsonify(heatmapdata)
コード例 #13
0
import helpers

with open('config.json') as config_file:
    config = json.load(config_file)

firebase = pyrebase.initialize_app(config)

db = firebase.database()

weights = db.child("sources").get().val()

test_news = [
    "https://www.nydailynews.com/news/politics/ny-trump-first-pitch-washington-nationals-world-series-20191026-o5rnjkq3trhfnpenpvah4fadtm-story.html?fbclid=IwAR1Bz7xj629rTFw8p5AaKLYSrG6SvLaAZwBNC4vTdMJ0vCyVDzawh7rCnG0",
    "https://www.mercurynews.com/2019/10/26/pge-shutoffs-grow-nearly-1-million-customers-to-lose-power/",
    "https://www.aljazeera.com/news/2019/10/russia-decries-decision-secure-oil-fields-eastern-syria-191026183322358.html",
    "https://www.cnn.com/2019/10/26/politics/company-government-contract-donald-trump-brother-robert-trump/index.html",
    "https://abcnews.go.com/Politics/state-department-official-expected-part-impeachment-probe/story?id=66532626&cid=clicksource_4380645_null_hero_hed",
    #"https://www.usnews.com/news/elections/articles/2019-10-25/the-state-of-the-presidential-race-100-days-until-the-iowa-caucus",
    #"https://www.npr.org/2019/10/26/773706177/pentagon-awards-10-billion-contract-to-microsoft-over-front-runner-amazon",
    #"https://www.nytimes.com/2019/10/26/us/tennessee-treehouse-fire.html",
    #"https://www.huffpost.com/entry/al-franken-comeback-politicon_n_5db2300be4b0a89374020d55?guccounter=1&guce_referrer=aHR0cHM6Ly93d3cuaHVmZnBvc3QuY29tL25ld3Mv&guce_referrer_sig=AQAAAK1EbjNMIbjvCJBNwOo5wC5hxouNhFg7KktUGaVrm_Qbkmi3iri2a7zDnQNG0ra9g_nGJ9jLrKljUlj5FqDZ-FL11ENuKe14fDZvkekFXJbeBU7TvwYfeWxNivPrt7wApwGqSTwlOjRmq7s1SQ16mj05pPdVdW7ZJTRQSOKGZ5Gs",
    "https://fox2now.com/2019/10/26/belleville-apartment-fire-caused-by-unattended-candle/"
]

for s in test_news:
    title, score = helpers.evaluate(s, weights)
    if title:
        print("Title: ", title)
    else:
        print("Title: Unknown")
    print(score)
コード例 #14
0
ファイル: main.py プロジェクト: rlzh/tsr-mscnn-elm
    training_operation = optimizer.minimize(loss_operation)

    # model evaluation
    correct_prediction = tf.equal(tf.argmax(logits, 1),
                                  tf.argmax(one_hot_y, 1))

    accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction,
                                                tf.float32))
    saver = tf.train.Saver()

    # training
    max_accuracy = 0
    with tf.Session() as sess:
        saver.restore(sess, './best_model_save_file')
        validation_accuracy = helpers.evaluate(X_test, y_test,
                                               accuracy_operation, n_test, x,
                                               y, keep_prob)
        print("validation accuracy = {:.3f}".format(validation_accuracy))

        # sess.run(tf.global_variables_initializer())
        # num_examples = X_train.shape[0]

        # print("Training with batch size ", BATCH_SIZE)
        # print()
        # for i in range(EPOCHS):
        #     X_train, y_train = skl.utils.shuffle(X_train, y_train)
        #     # process each batch
        #     for offset in range(0, num_examples, BATCH_SIZE):
        #         end = offset + BATCH_SIZE
        #         batch_x, batch_y = X_train[offset:end], y_train[offset:end]
        #         _, loss = sess.run([training_operation, loss_operation], feed_dict={
コード例 #15
0
def train(x_train, y_train, x_val, y_val, params):
    ''' TODO: documentation '''

    # Parameters String used for saving the files
    parameters_str = str('_d' + str(params['do']).replace('.', '') + '_a' +
                         str(params['a']).replace('.', '') + '_k' +
                         str(params['k']).replace('.', '') + '_c' +
                         str(params['cl']).replace('.', '') + '_s' +
                         str(params['s']).replace('.', '') + '_pf' +
                         str(params['pf']).replace('.', '') + '_pt' +
                         params['pt'] + '_fp' +
                         str(params['fp']).replace('.', '') + '_opt' +
                         params['opt'] + '_obj' + params['obj'])

    # Printing the parameters of the model
    print('[Dropout Param] \t->\t' + str(params['do']))
    print('[Alpha Param] \t\t->\t' + str(params['a']))
    print('[Multiplier] \t\t->\t' + str(params['k']))
    print('[Patience] \t\t->\t' + str(params['patience']))
    print('[Tolerance] \t\t->\t' + str(params['tolerance']))
    print('[Input Scale Factor] \t->\t' + str(params['s']))
    print('[Pooling Type] \t\t->\t' + params['pt'])
    print('[Pooling Factor] \t->\t' + str(str(params['pf'] * 100) + '%'))
    print('[Feature Maps Policy] \t->\t' + params['fp'])
    print('[Optimizer] \t\t->\t' + params['opt'])
    print('[Objective] \t\t->\t' + get_Obj(params['obj']))
    print('[Results filename] \t->\t' +
          str(params['res_alias'] + parameters_str + '.txt'))

    # Rescale Input Images
    if params['s'] != 1:
        print('\033[93m' + 'Rescaling Patches...' + '\033[0m')
        x_train = np.asarray(
            np.expand_dims([
                cv2.resize(x_train[i, 0, :, :], (0, 0),
                           fx=params['s'],
                           fy=params['s']) for i in xrange(x_train.shape[0])
            ], 1))
        x_val = np.asarray(
            np.expand_dims([
                cv2.resize(
                    x_val[i, 0, :, :], (0, 0), fx=params['s'], fy=params['s'])
                for i in xrange(x_val.shape[0])
            ], 1))
        print('\033[92m' + 'Done, Rescaling Patches' + '\033[0m')
        print('[New Data Shape]\t->\tX: ' + str(x_train.shape))

    model = get_model(x_train.shape, y_train.shape, params)

    # Counters-buffers
    maxf = 0
    maxacc = 0
    maxit = 0
    maxtrainloss = 0
    maxvaloss = np.inf
    p = 0
    it = 0
    best_model = model

    # Open file to write the results
    open(params['res_alias'] + parameters_str + '.csv',
         'a').write('Epoch, Val_fscore, Val_acc, Train_loss, Val_loss\n')
    open(params['res_alias'] + parameters_str + '-Best.csv',
         'a').write('Epoch, Val_fscore, Val_acc, Train_loss, Val_loss\n')

    while p < params['patience']:
        p += 1

        # Fit the model for one epoch
        print('Epoch: ' + str(it))
        history = model.fit(x_train,
                            y_train,
                            batch_size=128,
                            nb_epoch=1,
                            validation_data=(x_val, y_val),
                            shuffle=True)

        # Evaluate models
        y_score = model.predict(x_val, batch_size=1050)
        fscore, acc, cm = H.evaluate(np.argmax(y_val, axis=1),
                                     np.argmax(y_score, axis=1))
        print('Val F-score: ' + str(fscore) + '\tVal acc: ' + str(acc))

        # Write results in file
        open(params['res_alias'] + parameters_str + '.csv', 'a').write(
            str(
                str(it) + ', ' + str(fscore) + ', ' + str(acc) + ', ' +
                str(np.max(history.history['loss'])) + ', ' +
                str(np.max(history.history['val_loss'])) + '\n'))

        # check if current state of the model is the best and write evaluation metrics to file
        if fscore > maxf * params[
                'tolerance']:  # if fscore > maxf*params['tolerance']:
            p = 0  # restore patience counter
            best_model = model  # store current model state
            maxf = fscore
            maxacc = acc
            maxit = it
            maxtrainloss = np.max(history.history['loss'])
            maxvaloss = np.max(history.history['val_loss'])

            print(np.round(100 * cm / np.sum(cm, axis=1).astype(float)))
            open(params['res_alias'] + parameters_str + '-Best.csv',
                 'a').write(
                     str(
                         str(maxit) + ', ' + str(maxf) + ', ' + str(maxacc) +
                         ', ' + str(maxtrainloss) + ', ' + str(maxvaloss) +
                         '\n'))

        it += 1

    print('Max: fscore:', maxf, 'acc:', maxacc, 'epoch: ', maxit,
          'train loss: ', maxtrainloss, 'validation loss: ', maxvaloss)

    return best_model
コード例 #16
0
    correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(one_hot_y, 1))

    accuracy_operation = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
    saver = tf.train.Saver()

    # training
    max_accuracy = 0
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())
        num_examples = X_train.shape[0]

        print("Training with batch size ", BATCH_SIZE)
        print()
        for i in range(EPOCHS):
            X_train, y_train = skl.utils.shuffle(X_train, y_train)
            # process each batch
            for offset in range(0, num_examples, BATCH_SIZE):
                end = offset + BATCH_SIZE
                batch_x, batch_y = X_train[offset:end], y_train[offset:end]
                _, loss = sess.run([training_operation, loss_operation], feed_dict={x: batch_x, y: batch_y, keep_prob: dropout})  # execute session
            validation_accuracy = helpers.evaluate(X_valid, y_valid, accuracy_operation, BATCH_SIZE, x, y, keep_prob)
            print("EPOCH {} ...".format(i + 1), " validation accuracy = {:.3f}".format(validation_accuracy))
            if validation_accuracy > max_accuracy:  # save only highest accuracy we've achieved so far
                max_accuracy = validation_accuracy
                saver.save(sess, './best_model_save_file')
                print("Highest accuracy seen so far. Model saved.")
            else:
                print("Not highest accuracy seen so far. Model not saved.")
            print()

コード例 #17
0
ファイル: expressions.py プロジェクト: istalker2/MuranoDsl
    def execute(self, context, murano_class):
        result = helpers.evaluate(self.expression, context)
        if self.destination:
            self.destination(result, context, murano_class)

        return result
コード例 #18
0
ファイル: cnn_model.py プロジェクト: fobikr/ild-cnn
def train(x_train, y_train, x_val, y_val, params):
    ''' TODO: documentation '''

    
    # Parameters String used for saving the files
    parameters_str = str('_d' + str(params['do']).replace('.', '') +
                         '_a' + str(params['a']).replace('.', '') + 
                         '_k' + str(params['k']).replace('.', '') + 
                         '_c' + str(params['cl']).replace('.', '') + 
                         '_s' + str(params['s']).replace('.', '') + 
                         '_pf' + str(params['pf']).replace('.', '') + 
                         '_pt' + params['pt'] +
                         '_fp' + str(params['fp']).replace('.', '') +
                         '_opt' + params['opt'] +
                         '_obj' + params['obj'])

    # Printing the parameters of the model
    print('[Dropout Param] \t->\t'+str(params['do']))
    print('[Alpha Param] \t\t->\t'+str(params['a']))
    print('[Multiplier] \t\t->\t'+str(params['k']))
    print('[Patience] \t\t->\t'+str(params['patience']))
    print('[Tolerance] \t\t->\t'+str(params['tolerance']))
    print('[Input Scale Factor] \t->\t'+str(params['s']))
    print('[Pooling Type] \t\t->\t'+ params['pt'])
    print('[Pooling Factor] \t->\t'+str(str(params['pf']*100)+'%'))
    print('[Feature Maps Policy] \t->\t'+ params['fp'])
    print('[Optimizer] \t\t->\t'+ params['opt'])
    print('[Objective] \t\t->\t'+ get_Obj(params['obj']))
    print('[Results filename] \t->\t'+str(params['res_alias']+parameters_str+'.txt'))

    # Rescale Input Images
    if params['s'] != 1:
        print('\033[93m'+'Rescaling Patches...'+'\033[0m')
        x_train = np.asarray(np.expand_dims([cv2.resize(x_train[i, 0, :, :], (0,0), fx=params['s'], fy=params['s']) for i in xrange(x_train.shape[0])], 1))
        x_val = np.asarray(np.expand_dims([cv2.resize(x_val[i, 0, :, :], (0,0), fx=params['s'], fy=params['s']) for i in xrange(x_val.shape[0])], 1))
        print('\033[92m'+'Done, Rescaling Patches'+'\033[0m')
        print('[New Data Shape]\t->\tX: '+str(x_train.shape))

    model = get_model(x_train.shape, y_train.shape, params)

    # Counters-buffers
    maxf         = 0
    maxacc       = 0
    maxit        = 0
    maxtrainloss = 0
    maxvaloss    = np.inf
    p            = 0
    it           = 0
    best_model   = model

    # Open file to write the results
    open(params['res_alias']+parameters_str+'.csv', 'a').write('Epoch, Val_fscore, Val_acc, Train_loss, Val_loss\n')
    open(params['res_alias']+parameters_str+'-Best.csv', 'a').write('Epoch, Val_fscore, Val_acc, Train_loss, Val_loss\n')
    
    while p < params['patience']:
        p += 1

        # Fit the model for one epoch
        print('Epoch: ' + str(it))
        history = model.fit(x_train, y_train, batch_size=128, nb_epoch=1, validation_data=(x_val,y_val), shuffle=True)

        # Evaluate models
        y_score = model.predict(x_val, batch_size=1050)
        fscore, acc, cm = H.evaluate(np.argmax(y_val, axis=1), np.argmax(y_score, axis=1))
        print('Val F-score: '+str(fscore)+'\tVal acc: '+str(acc))

        # Write results in file
        open(params['res_alias']+parameters_str+'.csv', 'a').write(str(str(it)+', '+str(fscore)+', '+str(acc)+', '+str(np.max(history.history['loss']))+', '+str(np.max(history.history['val_loss']))+'\n'))

        # check if current state of the model is the best and write evaluation metrics to file
        if fscore > maxf*params['tolerance']:  # if fscore > maxf*params['tolerance']:
            p            = 0  # restore patience counter
            best_model   = model  # store current model state
            maxf         = fscore 
            maxacc       = acc
            maxit        = it
            maxtrainloss = np.max(history.history['loss'])
            maxvaloss    = np.max(history.history['val_loss'])

            print(np.round(100*cm/np.sum(cm,axis=1).astype(float)))
            open(params['res_alias']+parameters_str+'-Best.csv', 'a').write(str(str(maxit)+', '+str(maxf)+', '+str(maxacc)+', '+str(maxtrainloss)+', '+str(maxvaloss)+'\n'))

        it += 1

    print('Max: fscore:', maxf, 'acc:', maxacc, 'epoch: ', maxit, 'train loss: ', maxtrainloss, 'validation loss: ', maxvaloss)

    return best_model
コード例 #19
0
ファイル: deap_1a.py プロジェクト: lbkelly/j_acezero
    def run_algorithm(self, seed):
        # Set up custom hall of fame to keep track of the top chromosomes and their generations
        hall_of_fame = hof.HallOfFame(self.HOF_SIZE)
        # set up standard hall of fame that comes with DEAP
        hall_of_fame_with_dupes = tools.HallOfFame(self.HOF_SIZE)
        # Get date and time
        date_time = datetime.datetime.now().strftime("%m-%d_%I%M%p")

        # print out to file
        file_name = date_time + '.txt'
        sys.stdout = open(file_name, 'w')

        print 'Seed:', seed

        i = 0
        while i < self.GENERATIONS and not self.isConverged:
            i += 1
            print('--------------------------------' + 'Generation: ' +
                  str(i) + '-----------------------------------')
            # evaluate each chromosome in the population and assign its fitness score
            for index, x in enumerate(self.population):
                # update the chromosome, write out to JSON tactical file
                chromosome_parameters.update_chromosome(
                    x[0].value, x[1].value, x[2].value, x[3].value, x[4].value)
                # use Ace0 to evaluate the chromosome
                x.fitness.values = helper.evaluate(self.repetitions)

            # Select the best chromosome from this generation and display it
            best_chromosome = tools.selBest(self.population, 1)[0]
            print "Best chromosome is: ", helper.list_to_string(
                best_chromosome), best_chromosome.fitness.values

            # Select worst chromosome and display
            worst_chromosome = tools.selWorst(self.population, 1)[0]
            print "Worst chromosome is: ", helper.list_to_string(
                worst_chromosome), worst_chromosome.fitness.values

            # Get the over all fitness values
            sum_fits = sum(ind.fitness.values[0] for ind in self.population)
            average_fitness = sum_fits / self.POP
            print 'Generation average fitness: ', average_fitness

            # save best and average fitness to plot lists
            self.plot_best_fitness.append(best_chromosome.fitness.values)
            self.plot_average_fitness.append(average_fitness)
            self.plot_worst_fitness.append(worst_chromosome.fitness.values)

            # Update the hall of fame to track the best chromosomes from each generation
            hall_of_fame.update(self.population, i)
            hall_of_fame_with_dupes.update(self.population)

            # hall_of_fame.print_hof()

            # this is where we evolve the population
            # Select the next generation individuals
            offspring = self.toolbox.select(self.population,
                                            len(self.population))
            # Clone the selected individuals so we can apply crossover
            offspring = list(map(self.toolbox.clone, offspring))

            # Apply crossover on the offspring
            for child1, child2 in zip(offspring[::2], offspring[::-2]):
                if random.random() < self.CROSSOVER_PROBABILITY:
                    # mate the two children
                    self.toolbox.mate(child1, child2)

            # Apply mutation on the offspring
            for mutant in offspring:
                if random.random() < self.MUTATION_PROBABILITY:
                    # print 'Mutated Chromosome before: ', helper.list_to_string(mutant)
                    for index, x in enumerate(mutant):
                        mutant[index].value = helper.convert_range(
                            mutant[index].value, mutant[index].min,
                            mutant[index].max)

                    self.toolbox.mutate(mutant)

                    for index, x in enumerate(mutant):
                        mutant[index].value = helper.change_back(
                            mutant[index].value, mutant[index].min,
                            mutant[index].max)
                        helper.bounds_check(mutant[index])

                    # print 'Mutated Chromosome after: ', helper.list_to_string(mutant)

            # The population is entirely replaced by the offspring
            self.population[:] = offspring

            if float(best_chromosome.fitness.values[0]
                     ) - average_fitness < 0.0001:
                self.converge_tracker += 1
                if self.converge_tracker >= self.converge_tracker_max:
                    print 'CONVERGED'
                    self.isConverged = True
            else:
                self.converge_tracker = 0

            # # Elitism
            self.population[0] = hall_of_fame_with_dupes[0]

        print(
            '-------------------------------------Hall Of Fame Regular----------------------------------------'
        )
        for chromosomes in hall_of_fame_with_dupes:
            print 'Chromosome: ', helper.list_to_string(
                chromosomes), 'Fitness: ', chromosomes.fitness

        print(
            '-------------------------------------Hall Of Fame with Gen----------------------------------------'
        )
        hall_of_fame.print_hof()

        print(
            '-------------------------------------Stats----------------------------------------'
        )
        print("Pop size: " + str(self.POP))
        print("Generations: " + str(self.GENERATIONS))
        print("Crossover Prob: " + str(self.CROSSOVER_PROBABILITY))
        print("Mutation Prob: " + str(self.MUTATION_PROBABILITY))

        # Select the best chromosome from this generation and display it
        best_chromosome = tools.selBest(self.population, 1)[0]
        print "Best chromosome is: ", helper.list_to_string(
            best_chromosome), best_chromosome.fitness.values

        # Select worst chromosome and display
        worst_chromosome = tools.selWorst(self.population, 1)[0]
        print "Worst chromosome is: ", helper.list_to_string(
            worst_chromosome), worst_chromosome.fitness.values

        # Get the over all fitness values
        sum_fits = sum(ind.fitness.values[0] for ind in self.population)
        average_fitness = sum_fits / self.POP
        print 'Generation average fitness: ', average_fitness

        title = 'Seed: ' + str(seed)

        visualization.draw_plot(title, self.plot_average_fitness,
                                self.plot_best_fitness,
                                self.plot_worst_fitness,
                                'average per generation', 'best fitness',
                                'worst fitness', 'generation', 'fitness', 1,
                                250, 150, date_time)

        del creator.fitness
        del creator.Tactic
コード例 #20
0
def updategraphs():

    # Query for form data
    bundles = request.form.get("bundles")
    targetmargin = request.form.get("targetmargin")
    minpayout = request.form.get("minpayout")
    maxpayout = request.form.get("maxpayout")
    kgperperson = int(request.form.get("kgperperson"))
    interest = float(request.form.get("interest"))
    deposit = float(request.form.get("deposit"))

    strikes = np.linspace(0, 6000, 13)
    policyid_list = []
    payouts_list = []
    premiums_list = []
    crit_thresh_list = []
    clsr_list = []
    premsaspc_list = []
    realisedmargin_list = []

    for strike in strikes:
        strike = int(strike)
        # Access pre-calculated payout and premium data from the database
        # (calculation of these values is too computationally intensive to run in the app)
        res = db.execute(
            f"""SELECT policyid, strike, payouts, premiums FROM policies WHERE strike={strike} AND minpayout={minpayout} AND maxpayout={maxpayout} AND bundles={bundles} AND targetmargin={targetmargin} ORDER BY strike"""
        ).fetchone()

        policyid_list.append(res.policyid)

        payouts = pd.DataFrame(res.payouts).transpose()
        payouts.index = [int(x) for x in payouts.index]
        payouts = payouts.sort_index()

        premiums = pd.DataFrame(res.premiums).transpose()
        premiums.index = [int(x) for x in premiums.index]
        premiums = premiums.sort_index()

        # Calculate critical critloss DF and total_thresh
        crit_thresh_df = helpers.calc_crit(data,
                                           indexyields,
                                           loandeposit=deposit,
                                           loaninterest=interest,
                                           kg_per_mouth=kgperperson)

        cl_noins, cl_ins, clsr, clcr, clfr, rmsr, premsaspc, realisedmargin = helpers.evaluate(
            realyields,
            crit_thresh_df,
            payouts,
            premiums,
            data['farmarea'],
            startyear=startyear)

        clsr_list.append(clsr)
        premsaspc_list.append(premsaspc)
        realisedmargin_list.append(realisedmargin)

    datadict = {
        "strikes": strikes.tolist(),
        "clsr_list": clsr_list,
        "premsaspc_list": premsaspc_list,
        "policyid_list": policyid_list,
        "realised_margin": realisedmargin_list
    }

    return jsonify(datadict)
コード例 #21
0
def main():
    # Import data from csv
    data = pd.read_csv('data/data123.csv', header=0, index_col='siteid')

    # Pull out index yields and real yields into their own dataframes
    indcols = []
    realcols = []
    for col in data.columns:
        if col[:5] == "index":
            indcols.append(col)
        elif col[:4] == 'real':
            realcols.append(col)

    indexyields = data[indcols]
    realyields = data[realcols]

    # Create array of years and relabel the columns of indexyields and realyields
    years = [x[-4:] for x in indexyields.columns]
    indexyields.columns = years
    realyields.columns = years

    # --------------------------

    # Analyse combinations of insurance parameters at default crit loss
    # parameters and write results to the POLICIES table of the database

    # strike = 4000
    # bundle = 2
    maxpayout = 999999
    # targetmargin = 0.25

    strikes = np.linspace(0, 6000, 13)
    bundles = [0, 1, 2, 3]
    minpayouts = [0, 500, 1000, 2000]
    targetmargins = [0, 0.1, 0.2, 0.3, 0.4, 0.5]

    loandeposits = [0, 0.5]  # [0, 0.2, 0.5, 1.0]
    loaninterests = [0.1, 0.2]  # [0.1, 0.2, 0.3]
    kg_per_mouths = [90, 180]  # [90, 180, 360]

    combinations = (len(strikes) * len(bundles) * len(minpayouts) *
                    len(targetmargins))
    counter = 1

    for strike in strikes:
        for bundle in bundles:
            for minpayout in minpayouts:
                for targetmargin in targetmargins:

                    payouts, premiums = helpers.calc_payouts_premiums(
                        indexyields,
                        strikelevel=strike,
                        bundles=bundle,
                        minpayout=minpayout,
                        maxpayout=maxpayout,
                        targetmargin=targetmargin)

                    crit_thresh_df = helpers.calc_crit(data,
                                                       indexyields,
                                                       loandeposit=0.20,
                                                       loaninterest=0.19,
                                                       kg_per_mouth=180)

                    cl_noins, cl_ins, clsr, clcr, clfr, premsaspc, realisedmargin = helpers.evaluate(
                        realyields,
                        crit_thresh_df,
                        payouts,
                        premiums,
                        data['farmarea'],
                        startyear='1990')

                    payouts_json = payouts.to_json(orient='index')
                    premiums_json = premiums.to_json(orient='index')

                    db.execute(
                        """INSERT INTO policies(strike, bundles, minpayout, maxpayout, targetmargin, realisedmargin, clsr, clcr, clfr, premsaspc, payouts, premiums)
                               VALUES (:strike, :bundle, :minpayout, :maxpayout, :targetmargin, :realisedmargin, :clsr, :clcr, :clfr, :premsaspc, :payouts, :premiums);""",
                        {
                            "strike": strike,
                            "bundle": bundle,
                            "minpayout": minpayout,
                            "maxpayout": maxpayout,
                            "targetmargin": targetmargin,
                            "realisedmargin": realisedmargin,
                            "clsr": clsr,
                            "clcr": clcr,
                            "clfr": clfr,
                            "premsaspc": premsaspc,
                            "payouts": payouts_json,
                            "premiums": premiums_json
                        })

                    print(f"Added {counter} of {combinations}")
                    counter += 1

    # Isolate dat for FARMS table
    for ind, row in data.iterrows():
        siteid = ind
        farmarea = row['farmarea']
        hhsize = row['householdsize']
        region = row['group']
        indexyield = indexyields.iloc[ind, :].to_json()
        realyield = indexyields.iloc[ind, :].to_json()

        # Write data to FARMS table
        db.execute(
            """INSERT INTO farms (siteid, farmarea, hhsize, region, indexyields, realyields)
                   VALUES (:siteid, :farmarea, :hhsize, :region, :indexyield, :realyield);""",
            {
                'siteid': siteid,
                'farmarea': farmarea,
                'hhsize': hhsize,
                'region': region,
                'indexyield': indexyield,
                'realyield': realyield
            })

    db.commit()