Example #1
0
def doEvaluation(configFile):
    """
    Do the track model evaluation processing, using :mod:`Evaluate`.

    To perform this stage, it is recommended to generate an event set
    with several simulated years (e.g. 10, 20, 30 years) in each
    simulation.

    A good setting might be::

        [Actions]
        ExecuteTrackGenerator=True
        ExecuteEvaluate=True

        [TrackGenerator]
        NumSimulations=1000
        YearsPerSimulation=50

    This will generate 1000 simulations each with 50 years of simulated
    TC activity. :mod:`Evaluate` will then compare pressure distributions,
    track density, landfall rates and longitude crossing rates for the
    input dataset and the full 1000 simulations.

    :param str configFile: Name of the configuration file.

    """

    log.info("Running Evaluation")

    import Evaluate
    Evaluate.run(configFile)
Example #2
0
File: tcrm.py Project: jmettes/tcrm
def doEvaluation(configFile):
    """
    Do the track model evaluation processing, using :mod:`Evaluate`.

    To perform this stage, it is recommended to generate an event set
    with several simulated years (e.g. 10, 20, 30 years) in each
    simulation.

    A good setting might be::

        [Actions]
        ExecuteTrackGenerator=True
        ExecuteEvaluate=True
        
        [TrackGenerator]
        NumSimulations=1000
        YearsPerSimulation=50

    This will generate 1000 simulations each with 50 years of simulated
    TC activity. :mod:`Evaluate` will then compare pressure distributions,
    track density, landfall rates and longitude crossing rates for the
    input dataset and the full 1000 simulations.
    
    :param str configFile: Name of the configuration file.

    """

    log.info("Running Evaluation")

    import Evaluate
    Evaluate.run(configFile)
Example #3
0
    def Evaluate(self, sess, val_batches, score_file_path):
        labels = []
        self.all_candidate_scores = []
        val_batch_num = len(val_batches["response"])

        # eva_score_file = open(score_file_path, 'w')
        for batch_index in xrange(val_batch_num):
            feed_dict = {
                self.utterance_ph:
                np.array(val_batches["turns"][batch_index]),
                self.all_utterance_len_ph:
                np.array(val_batches["every_turn_len"][batch_index]),
                self.response_ph:
                np.array(val_batches["response"][batch_index]),
                self.response_len:
                np.array(val_batches["response_len"][batch_index]),
                self.y_true:
                np.array(val_batches["label"][batch_index])
            }
            val_loss = sess.run(self.total_loss, feed_dict=feed_dict)
            #  print('val_loss',val_loss)
            candidate_scores = sess.run(self.y_pred, feed_dict=feed_dict)
            self.all_candidate_scores.append(candidate_scores[:, 1])

            labels.extend(val_batches["label"][batch_index])
        #  for i in xrange(len(val_batches["label"][batch_index])):
        #  eva_score_file.write(str(candidate_scores[i]) +'\t'+str(val_batches["label"][batch_index][i])+ '\n')

    # eva_score_file.close()
        all_candidate_scores = np.concatenate(self.all_candidate_scores,
                                              axis=0)
        Evaluate.ComputeR10_1(all_candidate_scores, labels)
        Evaluate.ComputeR10_2(all_candidate_scores, labels)
        Evaluate.ComputeR10_5(all_candidate_scores, labels)
        Evaluate.ComputeR2_1(all_candidate_scores, labels)
Example #4
0
    def TestModel(self, conf):

        if not os.path.exists(conf['save_path']):
            os.makedirs(conf['save_path'])
        print('beging test starting loading data')
        print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))
        train_data, val_data, test_data = pickle.load(
            open(conf["data_path"], 'rb'))
        print('finish loading data')

        test_batches = reader.build_batches(test_data, conf)

        print("finish building test batches")
        print(time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time())))

        # refine conf
        test_batch_num = len(test_batches["response"])

        with tf.Session() as sess:
            saver = tf.train.Saver()
            # with tf.Session() as sess:
            # sess.run(init)
            saver.restore(sess, os.path.join(conf["init_model"], "model.2"))
            print("sucess init %s" % conf["init_model"])

            score_file_path = conf['save_path'] + 'score.test'
            score_file = open(score_file_path, 'w')
            all_candidate_score = []
            labels = []
            for batch_index in xrange(test_batch_num):
                # print('utterance_ph',np.array(test_batches["turns"][batch_index]).shape)
                feed_dict = {
                    self.utterance_ph:
                    np.array(test_batches["turns"][batch_index]),
                    # _model.tt_turns_len: test_batches["tt_turns_len"][batch_index],
                    self.all_utterance_len_ph:
                    np.array(test_batches["every_turn_len"][batch_index]),
                    self.response_ph:
                    np.array(test_batches["response"][batch_index]),
                    self.response_len:
                    np.array(test_batches["response_len"][batch_index]),
                    # _model.label: test_batches["label"][batch_index]
                }
                # last_hidden = sess.run(self.last_hidden, feed_dict=feed_dict)
                #  print('last_hidden', last_hidden.shape)
                candidate_scores = sess.run(self.y_pred, feed_dict=feed_dict)
                all_candidate_score.append(candidate_scores[:, 1])
                # scores = sess.run(_model.logits, feed_dict=feed)

                for i in xrange(conf["batch_size"]):
                    score_file.write(
                        str(candidate_scores[i]) + '\t' +
                        str(test_batches["label"][batch_index][i]) + '\n')
                    labels.append(test_batches["label"][batch_index][i])
            score_file.close()

            all_candidate_scores = np.concatenate(all_candidate_score, axis=0)
            Evaluate.ComputeR10_1(all_candidate_scores, labels)
            Evaluate.ComputeR2_1(all_candidate_scores, labels)
Example #5
0
def main(cfg, load_model, musdb_path, output_path):
    model_config = cfg["model_config"]
    Evaluate.produce_musdb_source_estimates(model_config, load_model, musdb_path, output_path, subsets='test')
    
    
    
    
    
    
Example #6
0
    def Evaluate(self, sess):
        with open(ev_path, 'rb') as f:
            history, true_utt, labels = pickle.load(f)
        with open(ev_char_path, 'rb') as f:
            utt_char, true_ch_utt, ch_labels = pickle.load(f)
        self.all_candidate_scores = []
        history, history_len = utils.multi_sequences_padding(
            history, self.max_sentence_len)
        history, history_len = np.array(history), np.array(history_len)
        true_utt_len = np.array(
            utils.get_sequences_length(true_utt, maxlen=self.max_sentence_len))
        true_utt = np.array(
            pad_sequences(true_utt,
                          padding='post',
                          maxlen=self.max_sentence_len))
        utt_char = multi_char_sequences_padding(utt_char, 50)
        true_ch_utt = np.array(
            pad_sequences(true_ch_utt,
                          padding='post',
                          maxlen=self.max_sentence_len))

        low = 0
        dro = 0.1
        while True:
            feed_dict = {
                self.utterance_ph:
                np.concatenate([history[low:low + 200]], axis=0),
                self.all_utterance_len_ph:
                np.concatenate([history_len[low:low + 200]], axis=0),
                self.response_ph:
                np.concatenate([true_utt[low:low + 200]], axis=0),
                self.response_len:
                np.concatenate([true_utt_len[low:low + 200]], axis=0),
                self.response_cph:
                np.concatenate([true_ch_utt[low:low + 200]],
                               axis=0),  # todo negs
                self.utterance_cph:
                np.concatenate([utt_char[low:low + 200]], axis=0),
                self.dropout:
                dro,
                self.N:
                200,
                self.sample_numbers:
                1
            }
            candidate_scores = sess.run(self.y_pred, feed_dict=feed_dict)
            self.all_candidate_scores.append(candidate_scores[:, 1])
            low = low + 200
            if low >= history.shape[0]:
                break
        all_candidate_scores = np.concatenate(self.all_candidate_scores,
                                              axis=0)
        computeR10_1 = Evaluate.ComputeR10_1(all_candidate_scores, labels)
        computeR2_1 = Evaluate.ComputeR2_1(all_candidate_scores, labels)
        return computeR10_1, computeR2_1
Example #7
0
def dsd_100_experiment(model_config):
    print("SCRIPT START")
    # Create subfolders if they do not exist to save results
    for dir in [model_config["model_base_dir"], model_config["log_dir"]]:
        if not os.path.exists(dir):
            os.makedirs(dir)

    # Set up data input
    if os.path.exists('dataset.pkl'):
        with open('dataset.pkl', 'rb') as file:
            dataset = pickle.load(file)
        print("Loaded dataset from pickle!")
    else:
        dsd_train, dsd_test = Datasets.getMUSDB(model_config["musdb_path"])
        ccm = Datasets.getCCMixter("CCMixter.xml")

        # Pick 25 random songs for validation from MUSDB train set (this is always the same selection each time since we fix the random seed!)

        val_idx = np.random.choice(len(dsd_train), size=5, replace=False)
        train_idx = [i for i in range(len(dsd_train)) if i not in val_idx]
        print("Validation with MUSDB training songs no. " + str(train_idx))

        # Draw randomly from datasets
        dataset = dict()
        dataset["train_sup"] = [dsd_train[i] for i in train_idx] + ccm
        dataset["train_unsup"] = list(
        )  #[dsd_train[0][25:], dsd_train[1][25:], dsd_train[2][25:]] #[fma, list(), looperman]
        dataset["valid"] = [dsd_train[i] for i in val_idx]
        dataset["test"] = dsd_test

        with open('dataset.pkl', 'wb') as file:
            pickle.dump(dataset, file)
        print("Created dataset structure")

    # Setup dataset depending on task. Dataset contains sources in order: (mix, acc, bass, drums, other, vocal)
    if model_config["task"] == "voice":
        for i in range(25):
            dataset["train_sup"][i] = (dataset["train_sup"][i][0],
                                       dataset["train_sup"][i][1],
                                       dataset["train_sup"][i][5])
        for subset in ["valid", "test"]:
            for i in range(len(dataset[subset])):
                dataset[subset][i] = (dataset[subset][i][0],
                                      dataset[subset][i][1],
                                      dataset[subset][i][5])

    # Optimize in a +supervised fashion until validation loss worsens
    sup_model_path, sup_loss = optimise(dataset=dataset)
    print("Supervised training finished! Saved model at " + sup_model_path +
          ". Performance: " + str(sup_loss))
    Evaluate.produce_source_estimates(model_config, sup_model_path,
                                      model_config["musdb_path"],
                                      model_config["estimates_path"], "train")
Example #8
0
File: tcrm.py Project: squireg/tcrm
def doEvaluation(configFile):
    """
    Do the track model evaluation processing.
    
    :param str configFile: Name of the configuration file.
    
    """
    
    log.info("Running Evaluation")
    
    import Evaluate
    Evaluate.run(configFile)
Example #9
0
def main(cfg, model_path, input_path, output_path, source):
    model_config = cfg["model_config"]
    if source is None:
        if model_config['multiple_source_training']:
            raise ValueError('Please specify which source to extract: python predict_chorale.py with source=<source>')
        score_filenames = {
            source_name: get_score_filename(source_name)
            for source_name in model_config["source_names"]}
    else:
        score_filenames = {'source': get_score_filename(source)}
    Evaluate.produce_source_estimates(model_config, model_path, input_path, output_path, score_filenames)
    print('Outputs saved to: %s' % output_path)
 def Evaluate(self, test_path, model_path):
     saver = tf.train.Saver()
     config = tf.ConfigProto()
     config.gpu_options.per_process_gpu_memory_fraction = 0.4  # 只分配40%的显存
     with open(test_path, 'rb') as f:
         val_history, val_response, val_labels = pickle.load(f)
     first = True
     with tf.Session(config=config) as sess:
         saver.restore(sess, model_path)
         all_candidate_scores = []
         all_pred_labels = []
         low = 0
         batch_size_for_val = 2000
         while True:
             batch_history = self.copy_list(val_history[low:low +
                                                        batch_size_for_val])
             batch_history, batch_history_len = utils.multi_sequences_padding(
                 batch_history, self.max_sentence_len)
             batch_history, batch_history_len = np.array(
                 batch_history), np.array(batch_history_len)
             batch_response = self.copy_list(
                 val_response[low:low + batch_size_for_val])
             batch_response_len = np.array(
                 utils.get_sequences_length(batch_response,
                                            maxlen=self.max_sentence_len))
             batch_response = np.array(
                 pad_sequences(batch_response,
                               padding='post',
                               maxlen=self.max_sentence_len))
             feed_dict = {
                 self.utterance_ph: batch_history,
                 self.all_utterance_len_ph: batch_history_len,
                 self.response_ph: batch_response,
                 self.response_len: batch_response_len,
             }
             candidate_scores, pred_labels = sess.run(
                 [self.y_pred, self.class_label_pred], feed_dict=feed_dict)
             if first:
                 print(pred_labels)
                 first = False
             all_candidate_scores.append(candidate_scores[:, 1])
             all_pred_labels.append(pred_labels)
             low = low + batch_size_for_val
             if low >= len(val_labels):
                 break
         all_candidate_scores = np.concatenate(all_candidate_scores, axis=0)
         all_pred_labels = np.concatenate(all_pred_labels, axis=0)
     return Evaluate.precision_of_classification(
         all_pred_labels,
         val_labels), Evaluate.mrr_and_rnk(all_candidate_scores,
                                           val_labels,
                                           response_num_per_query=11)
Example #11
0
def main(cfg, model_path, input_path, output_path):
    model_config = cfg["model_config"]
    Evaluate.produce_source_estimates(model_config, model_path, input_path,
                                      output_path)


# Others
# CH01_Bach_audio.wav
# CH04_Mendelssohn_audio.wav
# CH05_Byrd_audio.wav

# unet_l1_283877-228000-noESMUC
# waveunet_755824-246000-noESMUC
Example #12
0
    def model_evaluate(self):
        # evaluate the model

        ## preprocess: find test_users and candidate_items
        test_users = np.unique(self.test_tuple[:,0])                     # all users in the test set
        allItems_train = np.unique(self.train_tuple[:,1])                # all items in the train set
        allItems_test = np.unique(self.test_tuple[:,1])                  # all items in the test set
        candidate_items = np.union1d(allItems_train,allItems_test)  # all items in the train and test set

        recEval = Evaluate(self.userFactors,self.itemFactors,self.train_matrix,self.test_matrix,test_users,candidate_items)
        ret = recEval.CalcMetrics()
        print 'AUC =',ret[0],'Prec@5 =',ret[1],'Prec@10 =',ret[2], 'MAP =', ret[3], 'Rec@5 =', ret[4], 'Rec@10 =', ret[5], 'NDCG =', ret[6], 'MRR =', ret[7]
        return ret
Example #13
0
def training(model, dataset, epochs, num_negative, filename):
    with tf.Session() as sess:
        sess.run(tf.global_variables_initializer())

        batch_begin = time()
        _Data = Data(dataset, num_negative)
        batches = _Data._batches
        batch_time = time() - batch_begin

        num_batch = len(batches[2])
        batch_index = list(range(num_batch))

        #initialize the evaluation feed dicts
        testDict = evaluate._init_test_data(model, sess, dataset.testRatings,
                                            dataset.testNegatives,
                                            dataset.trainList)

        best_hr, best_ndcg = 0.0, 0.0
        for epoch_count in range(epochs):
            train_begin = time()
            training_batch(batch_index, model, sess, _Data)
            train_time = time() - train_begin

            loss_begin = time()
            train_loss = training_loss(batch_index, model, sess, _Data)
            loss_time = time() - loss_begin

            eval_begin = time()
            (hits, ndcgs,
             losses) = evaluate.evaluate(model, sess, dataset.testRatings,
                                         dataset.testNegatives, testDict)
            hr, ndcg, test_loss = np.array(hits).mean(), np.array(
                ndcgs).mean(), np.array(losses).mean()
            eval_time = time() - eval_begin

            if hr > best_hr:
                best_hr = hr
                best_ndcg = ndcg

            print(
                "Epoch {} [{:.1f}s + {:.1f}s]: HR = {:.4f}, NDCG = {:.4f}, loss ={:.4f} [{:.4f}s] train_loss ={:.4f} [{:.1f}s]"
                .format(epoch_count, batch_time, train_time, hr, ndcg,
                        test_loss, eval_time, train_loss, loss_time))
            with open(filename, 'a+') as f:
                f.write(
                    "Epoch {} [{:.1f}s + {:.1f}s]: HR = {:.4f}, NDCG = {:.4f}, loss ={:.4f} [{:.4f}s] train_loss ={:.4f} [{:.1f}s]\n"
                    .format(epoch_count, batch_time, train_time, hr, ndcg,
                            test_loss, eval_time, train_loss, loss_time))
            f.close()
            np.random.shuffle(batch_index)
        return best_hr, best_ndcg
Example #14
0
def run(cfg):
    model_config = cfg["model_config"]
    print("SCRIPT START")
    # Create subfolders if they do not exist to save results
    for dir in [model_config["model_base_dir"], model_config["log_dir"]]:
        if not os.path.exists(dir):
            os.makedirs(dir)

    # Optimize in a supervised fashion until validation loss worsens
    sup_model_path, sup_loss = optimise()
    print("Supervised training finished! Saved model at " + sup_model_path + ". Performance: " + str(sup_loss))

    # Evaluate trained model on MUSDB
    Evaluate.produce_musdb_source_estimates(model_config, sup_model_path, model_config["musdb_path"], model_config["estimates_path"])
Example #15
0
def Eval_with_generator(gen):
    res_table = []
    c = 0
    for ref_sen, gen_sen in gen:
        try:
            c += 1
            res = Evaluate.ROUGE_eval(ref_sen, gen_sen)
            res_table.append(res)
            print('[INFO] CASE %d finish evaluation' % c)
            print(res)
        except ValueError:
            print('[ERROR] CASE %d failed in  evaluation' % c)
            pass

    Evaluate.do_eval(res_table)
 def evaluate_val_for_train(self, sess, data):
     val_history, val_response, val_labels = data
     all_candidate_scores = []
     low = 0
     batch_size_for_val=4000
     while True:
         batch_history = self.copy_list(val_history[low:low + batch_size_for_val])
         batch_history, batch_history_len = utils.multi_sequences_padding(batch_history, self.max_sentence_len)
         batch_history, batch_history_len = np.array(batch_history), np.array(batch_history_len)
         batch_response = self.copy_list(val_response[low:low + batch_size_for_val])
         batch_response_len = np.array(utils.get_sequences_length(batch_response, maxlen=self.max_sentence_len))
         batch_response = np.array(pad_sequences(batch_response, padding='post', maxlen=self.max_sentence_len))
         feed_dict = {self.utterance_ph: batch_history,
                      self.all_utterance_len_ph: batch_history_len,
                      self.response_ph: batch_response,
                      self.response_len: batch_response_len,
                      self.y_true: np.concatenate([val_labels[low:low + batch_size_for_val]], axis=0),
                      }
         candidate_scores,loss = sess.run([self.y_pred,self.total_loss], feed_dict=feed_dict)
         all_candidate_scores.append(candidate_scores[:, 1])
         low = low + batch_size_for_val
         if low >= len(val_labels):
             break
     all_candidate_scores = np.concatenate(all_candidate_scores, axis=0)
     return Evaluate.precision_of_matching_1(all_candidate_scores, val_labels,response_num_per_query=11),loss
Example #17
0
    def getSimilarity4Statements(self, folderpath):
        """Get similarity between candiadate statements and target statement."""
        getSimilarity = Evaluate.GetSimilarity('tfidf', self.rootpath)
        tokens_candidates, id2claims = getSimilarity.getCorpusOfCandidateClaims(
            folderpath)
        tokens_target = getSimilarity.getCorpusOfTargetClaim(folderpath)
        # print("tokens_statements", len(tokens_statements))
        # print("tokens_target", tokens_target)
        vectors_candidates = getSimilarity.getVector(tokens_candidates)
        vector_target = getSimilarity.getVector(tokens_target)
        # print("vectors_candidates", vectors_candidates[0:1].shape)
        # print("vector_target", vector_target.shape)
        similarities = getSimilarity.getCosineSimilarity(
            vectors_candidates, vector_target)
        # print(similarities, vectors_candidates)
        # print(similarities[0], len(similarities[0]))

        # average, max, min
        maximum = max(similarities[0])
        print("max: {}".format(maximum))

        id2similarities = dict(enumerate(list(similarities[0])))
        data = []
        for key in id2claims.keys():
            data.append([id2claims[key], id2similarities[key]])
        self.helper.dumpCsv(folderpath + "/final", "similarities.csv",
                            ['statement', 'similarity'], data)
Example #18
0
def run(cfg):
    sup_model_path = "./checkpoints/unet++_10_deep_supervised-258687/258687-128000"
    print("[Evaluation]  Start evaluation ... ")
    model_config = cfg["model_config"]
    model_config["evaluate_subnet"] = True
    if model_config["network"] != "unet++":
        raise NotImplementedError

    print("[unet++] Start to Evaluate Sliced Nested Unet!")
    for i in range(0, model_config["min_sub_num_layers"]):
        print("[unet++] Evaluating {}-layer subnet".format(i))
        model_config["estimates_path"] = os.path.join(
            model_config["estimates_path"], str(i))
        model_config["sub_num_layers"] = model_config["num_layers"] - i
        Evaluate.produce_musdb_source_estimates(model_config, sup_model_path,
                                                model_config["musdb_path"],
                                                model_config["estimates_path"])
Example #19
0
    def Test(self, data, goldstandard):
        self.testData = data
        self.goldstandard = goldstandard

        # Test with labelled Data
        prediction = self.Predict(data)

        score = Evaluate.accuracy(prediction, goldstandard)
Example #20
0
    def getSimilarityStatements2Tweets(self, folderpath):
        """Get similarity between candidate claims and tweets.

        Arguments:
            folderpath {str} -- the path to data folder

        Returns:
            None -- index_tweet_2_index_candidate_claim.json;
                    index_candidate_claim_2_index_tweet.json;
                    index_candidate_claim_2_tweet.json are generated.
        """
        getSimilarity = Evaluate.GetSimilarity('tfidf', self.rootpath)
        tokens_claims, id2claims = getSimilarity.getCorpusOfCandidateClaims(
            folderpath)
        print("length of statements ", len(tokens_claims))

        tokens_tweets, id2tweets = getSimilarity.getCorpusOfTweets(folderpath)
        print("length of tweets ", len(tokens_tweets))

        # return None if any of them is None
        if len(tokens_claims) == 0 or len(tokens_tweets) == 0:
            print("no statements or tweets.")
            return
        vectors_claims = getSimilarity.getVector(tokens_claims)
        print("shape of vectors_claims ", vectors_claims.shape)
        vector_tweets = getSimilarity.getVector(tokens_tweets)
        print("shape of vector_tweets ", vector_tweets.shape)

        # shape is #vector_tweets x #vectors_candidates
        similarities = getSimilarity.getCosineSimilarity(
            vectors_claims, vector_tweets)
        print("shape of similarities ", similarities.shape)

        # get max indices of candidates statement for each tweet
        index_tweet_2_max_index_candidate_claim = enumerate(
            list(np.argmax(similarities, axis=1)))
        self.helper.dumpJson(folderpath + "/final",
                             "index_tweet_2_index_candidate_claim.json",
                             index_tweet_2_max_index_candidate_claim)
        # reverse the key and value
        max_index_candidate_claim_2_index_tweet = defaultdict(list)
        for tid, sid in index_tweet_2_max_index_candidate_claim:
            max_index_candidate_claim_2_index_tweet[sid].append(tid)
        self.helper.dumpJson(folderpath + "/final",
                             "index_candidate_claim_2_index_tweet.json",
                             max_index_candidate_claim_2_index_tweet)
        # generate {index_claim: [tweet1, tweet2, ...]}
        index_candidate_claim_2_tweet = defaultdict(list)
        for index_candidate_claim in max_index_candidate_claim_2_index_tweet.keys(
        ):
            for index_tweet in max_index_candidate_claim_2_index_tweet[
                    index_candidate_claim]:
                index_candidate_claim_2_tweet[index_candidate_claim].append(
                    id2tweets[index_tweet])
        self.helper.dumpJson(os.path.join(folderpath, "final"),
                             "index_candidate_claim_2_tweet.json",
                             index_candidate_claim_2_tweet)
        print("index_candidate_claim_2_tweet.json has been saved.")
Example #21
0
def run(cfg):
    model_config = cfg["model_config"]
    experiment_id = cfg["experiment_id"]
    print("SCRIPT START")
    # Create subfolders if they do not exist to save results
    for dir in [model_config["model_base_dir"], model_config["log_dir"]]:
        if not os.path.exists(dir):
            os.makedirs(dir)

    # Optimize in a supervised fashion until validation loss worsens
    sup_model_path, sup_loss = optimise()
    print("Supervised training finished! Saved model at " + sup_model_path +
          ". Performance: " + str(sup_loss))

    # Evaluate trained model
    Evaluate.evaluate_dataset(model_config, "VCTK", str(experiment_id),
                              sup_model_path)  # VCTK
    Evaluate.evaluate_dataset(model_config, "DAPS", str(experiment_id),
                              sup_model_path)  # DAPS
Example #22
0
def run(cfg):
    model_config = cfg["model_config"]
    print("SCRIPT START")
    # Create subfolders if they do not exist to save results
    for dir in [model_config["model_base_dir"], model_config["log_dir"]]:
        if not os.path.exists(dir):
            os.makedirs(dir)

    #model_path = "./checkpoints/unet++_10_deep_supervised/243137-88000"
    #model_path = None
    sup_model_path = "./checkpoints/unet++_12_normal-599740/599740-110000"
    # Optimize in a supervised fashion until validation loss worsens
    #sup_model_path, sup_loss = optimise(model_path=model_path)
    #print("Supervised training finished! Saved model at " + sup_model_path + ". Performance: " + str(sup_loss))

    # Evaluate trained model on MUSDB
    # TODO
    Evaluate.produce_musdb_source_estimates(model_config, sup_model_path,
                                            model_config["musdb_path"],
                                            model_config["estimates_path"])
Example #23
0
def waveunet(cfg):
    sys.path.append('./Wave-U-Net')
    import Evaluate

    model_config = cfg["model_config"]
    model_path = cfg["model_path"]
    input_path = cfg['input_path']
    output_path = './'
    outputs = Evaluate.produce_source_estimates(model_config, model_path,
                                                input_path, output_path)

    return outputs
Example #24
0
def valid():
    all_scores = []
    for mini_batch in validloader:
        query, response = mini_batch[0], mini_batch[1]
        query = query.long().to(device)
        response = response.long().to(device)
        logit = model(query, response).squeeze()
        scores = F.softmax(logit, 0).cpu().detach().numpy()
        all_scores.append(np.argmax(scores[:, 1]))
    #all_scores = np.concatenate(all_scores,axis=0)
    print(np.size(all_scores), np.size(labels))
    return Evaluate.ComputeR10_1(all_scores, labels)
Example #25
0
def main():
    generator = Models.Generator()
    discriminator = Models.Discriminator()

    if HP.load_model:
        generator.load()
        discriminator.load()

    train_dataset, test_dataset = Data.load_dataset()
    test_dataset = Data.separate_dataset(test_dataset)

    if HP.biased_train_data:
        train_dataset = Data.load_train_biased_data(HP.biased_data_sizes)

    fids = []
    for epoch in range(HP.epochs):
        print('iter', epoch)
        start = time.time()
        Train.train(generator.model, discriminator.model, train_dataset, epoch)
        print('saving...')
        generator.save()
        discriminator.save()
        generator.save_images(epoch)
        print('saved')
        if HP.evaluate_model and (epoch + 1) % HP.epoch_per_evaluate == 0:
            fid = Evaluate.get_multi_fid(generator.model, test_dataset)
            print('fid :', fid)
            fids.append(fid)
        print('time: ', time.time() - start)

    if not HP.evaluate_model:
        fid = Evaluate.get_multi_fid(generator.model, test_dataset)
        print('fid :', fid)
        fids.append(fid)

    if not os.path.exists(HP.folder_name):
        os.makedirs(HP.folder_name)

    Data.save_graph(fids)
Example #26
0
 def Evaluate(self,sess):
     with open(evaluate_file, 'rb') as f:
        history, true_utt,labels = pickle.load(f)
     self.all_candidate_scores = []
     history, history_len = utils.multi_sequences_padding(history, self.max_sentence_len)
     history, history_len = np.array(history), np.array(history_len)
     true_utt_len = np.array(utils.get_sequences_length(true_utt, maxlen=self.max_sentence_len))
     true_utt = np.array(pad_sequences(true_utt, padding='post', maxlen=self.max_sentence_len))
     low = 0
     while True:
         feed_dict = {self.utterance_ph: np.concatenate([history[low:low + 200]], axis=0),
                      self.all_utterance_len_ph: np.concatenate([history_len[low:low + 200]], axis=0),
                      self.response_ph: np.concatenate([true_utt[low:low + 200]], axis=0),
                      self.response_len: np.concatenate([true_utt_len[low:low + 200]], axis=0),
                      }
         candidate_scores = sess.run(self.y_pred, feed_dict=feed_dict)
         self.all_candidate_scores.append(candidate_scores[:, 1])
         low = low + 200
         if low >= history.shape[0]:
             break
     all_candidate_scores = np.concatenate(self.all_candidate_scores, axis=0)
     Evaluate.ComputeR10_1(all_candidate_scores,labels)
     Evaluate.ComputeR2_1(all_candidate_scores,labels)
Example #27
0
def predict_chorale(model_config, chorale, model_path, output_path):
    mix_path = chorale['mix']
    chorale_number = os.path.basename(mix_path).split('_')[1]
    output_path = os.path.join(output_path, chorale_number)

    if model_config['multiple_source_training']:
        for source in model_config['source_names']:
            print('\t' + source)
            source_output_path = os.path.join(output_path, source)
            score_filenames = {'source': chorale[source + '_score']}
            Evaluate.produce_source_estimates(model_config, model_path,
                                              mix_path, source_output_path,
                                              score_filenames)

    else:
        if model_config['score_informed']:
            score_filenames = {
                source_name: chorale[source_name + '_score']
                for source_name in model_config["source_names"]
            }
        else:
            score_filenames = {}
        Evaluate.produce_source_estimates(model_config, model_path, mix_path,
                                          output_path, score_filenames)
Example #28
0
def main(cfg, model_path, output_path):
    

    model_config = cfg["model_config"]
    
    dataset = Datasets.get_dataset_pickle(model_config)
    
    L1 = []
    L2 = []
    for track in dataset['test']:
        
        output_track = os.path.basename(track['mix'])  
        output_track = os.path.join(output_path,output_track)
        
        print(output_track)
        
        Evaluate.produce_outputs(model_config, model_path, track, output_track)
        
        target, sr = Utils.load(track['mix'], sr=None, mono=False) 
        target = target/np.max(np.abs(target))
        
        soundfile.write(output_track+'_target.wav', target, sr)
        
        output, _ = Utils.load(output_track, sr=None, mono=False) 
        
        l1 = np.mean(np.abs(target-output))
        l2 = np.mean(np.square(target-output))
        
        L1.append(l1)
        L2.append(l2)
    
    print('L1: %.8f' % np.mean(np.asarray(L1)))
    print('L2: %.8f' % np.mean(np.asarray(L2)))
        
        
        
Example #29
0
    def getSimilarityStatements2Tweets(self, folderpath):
        """Get similarity between candidate statements and tweets.
        
        Arguments:
            folderpath {str} -- the path to data folder
        
        Returns:
            None -- index_candiadate_statement_2_index_tweet.json and index_tweet_2_index_candiadate_statement.json are generated.
        """
        getSimilarity = Evaluate.GetSimilarity('tfidf', self.rootpath)
        tokens_statements, id2candiadateStatements = getSimilarity.getCorpusFromCandidateStatements4Cluster(
            folderpath)
        print("length of statements ", len(tokens_statements))

        tokens_tweets, id2tweets = getSimilarity.getCorpusFromTweets4Cluster(
            folderpath)
        print("length of tweets ", len(tokens_tweets))

        # return None if any of them is None
        if len(tokens_statements) == 0 or len(tokens_tweets) == 0:
            print("no statements or tweets.")
            return
        vectors_candidates = getSimilarity.getVector(tokens_statements)
        print("shape of vectors_candidates ", vectors_candidates.shape)
        vector_tweets = getSimilarity.getVector(tokens_tweets)
        print("shape of vector_tweets ", vector_tweets.shape)

        # shape is #vector_tweets x #vectors_candidates
        similarities = getSimilarity.getCosineSimilarity(
            vectors_candidates, vector_tweets)
        print("shape of similarities ", similarities.shape)

        # get max indeices of candidates statement for each tweet
        index_tweet_2_max_index_candiadate_statement = enumerate(
            list(np.argmax(similarities, axis=1)))
        self.helper.dumpJson(
            folderpath + "/final",
            "index_tweet_2_index_candidate_statement.json",
            dict(index_tweet_2_max_index_candiadate_statement))
        # reverse the key and value
        max_index_candiadate_statement_2_index_tweet = defaultdict(list)
        for tid, sid in index_tweet_2_max_index_candiadate_statement:
            max_index_candiadate_statement_2_index_tweet[sid].append(tid)
        self.helper.dumpJson(folderpath + "/final",
                             "index_candidate_statement_2_index_tweet.json",
                             max_index_candiadate_statement_2_index_tweet)
Example #30
0
def CrossValDE(modelName,
               X,
               y,
               metric,
               procsessor=None,
               cv=3,
               times=1,
               random_state=0):
    """
    优化DE时用的交叉验证
    """
    res = []
    yt = []
    for i in y:
        if (i == 0):
            yt.append(0)
        else:
            yt.append(1)
    for t in range(times):
        skf = StratifiedKFold(n_splits=cv,
                              shuffle=True,
                              random_state=random_state + t)
        indices = list(skf.split(X=X, y=yt))
        #        print (" ",time.strftime("%M:%S"))
        for k in indices:
            x_train, y_train, x_test, y_test = X[k[0]], y[k[0]], X[k[1]], y[
                k[1]]

            if (procsessor is not None):
                #                t=procsessor.transform(x_train,y_train)
                #                print("t",len(t))
                #                if(len(t)==3):
                #                    qqq=1
                #                    qqq+=1
                x_train, y_train = procsessor.transform(x_train, y_train)
            estimator = buildModel(x_train, y_train, modelName)
            res.append(E.Eva(estimator, x_test, y_test, metric))


#        print (" ",time.strftime("%M:%S"))
    res = np.array(res)
    return res.mean()
    def evaluate_formulas(self, formulas, node, attributes, PARAS, STATES):
        Evaluate.size = 1
        for a in attributes:
            formula = formulas[a]

            try:          
                result = Evaluate.evaluate(formula, PARAS, STATES)            
            
            except:
                where = self.get_where()
                what = 'Error evaluating expression: ' + a + ' = ' + node.find(a).text + ' -> ' + formula
                raise BSException(where, what)
                  
            attr = next((x for x in self.parameters if x==a), '_NOT_FOUND_')
            if attr==a:
                PARAS[a] = result
            else:
                STATES[a] = result 
                  
        return (PARAS, STATES)
Example #32
0
 def fitness(self, targetPars, otherPars):
     """
     对于SMOTUNED,otherPars有一个值,数据集
     """
     res = []
     data = deepcopy(otherPars[0])
     stf = self.stratify(K=3, dataSet=data, randomSeed=1)
     for i in range(3):
         trainingSet = deepcopy(data.iloc[stf[i][0], :])
         testSet = deepcopy(data.iloc[stf[i][1], :])
         S = SMOTER(targetPars[0, 0], targetPars[0, 1], targetPars[0, 2],
                    trainingSet)
         trainingSet_SMOTE = S.smoteR()
         model = BM.buildModel(trainingSet=trainingSet_SMOTE,
                               modelType=self.modelName)
         res.append(
             E.Eva(model,
                   testSet=testSet,
                   metric=self.metric,
                   predValue="bug"))
     return sum(res) / len(res)
    def evaluate_formulas_batch(self, formulas, node, attributes, PARAS, STATES, num):
        Evaluate.size = num
        for a in attributes:
            formula = formulas[a]

            try:          
                result = Evaluate.evaluate(formula, PARAS, STATES)            
            
            except:
                where = self.get_where()
                what = 'Error evaluating expression: ' + a + ' = ' + node.find(a).text + ' -> ' + formula
                raise BSException(where, what)
                  
            attr = next((x for x in self.parameters if x==a), '_NOT_FOUND_')
             
            if (not isinstance(result, list)) and ( not isinstance(result, np.ndarray)):
                result = [result]*num    
           
            if attr==a:
                PARAS[a] = result
            else:
                STATES[a] = result 

        return (PARAS, STATES)
Example #34
0
def main():
    generator = Models.Generator()
    discriminator = Models.Discriminator()

    if HP.load_model:
        generator.load()
        discriminator.load()

    train_dataset, test_dataset = Dataset.load_dataset()

    fids = []
    for epoch in range(HP.epochs):
        print('iter', epoch)
        start = time.time()
        discriminator_loss, generator_loss = Train.train(generator.model, discriminator.model, train_dataset, epoch)
        print('discriminator loss :', discriminator_loss)
        print('generator loss :', generator_loss)
        print('saving...')
        generator.save()
        discriminator.save()
        generator.save_images(epoch)
        print('saved')

        if HP.evaluate_model and (epoch + 1) % HP.epoch_per_evaluation == 0:
            fid = Evaluate.get_average_fid(generator.model, test_dataset)
            fids.append(fid)
            print('fid: ', fid)
            Evaluate.save_graph(fids)

        print('time: ', time.time() - start)

    if not HP.evaluate_model:
        fid = Evaluate.get_average_fid(generator.model, test_dataset)
        fids.append(fid)
        print('fid: ', fid)
        Evaluate.save_graph(fids)
        return 1

    no_output = programParams.no_out
    # Open the videos
    truth_cap = None
    if programParams.doEval:
        truth_cap = cv2.VideoCapture(programParams.truth_path)
    input_cap = cv2.VideoCapture(programParams.input_path)

    csv_file = None
    csv_writer = None

    # Create CSV writer
    if not programParams.no_csv and programParams.doEval:
        csv_path, extension = os.path.splitext(programParams.input_path)
        csv_writer = e.createCSVWriter( csv_path )

        # Write headers
        csv_writer.writerow( e.FrameStats.GetCSVHeader() )

    # Get video parameters (try to retain same attributes for output video)
    width = int(input_cap.get(cv2.CAP_PROP_FRAME_WIDTH))
    height = int(input_cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
    fps = float(input_cap.get(cv2.CAP_PROP_FPS))
    codec = int(input_cap.get(cv2.CAP_PROP_FOURCC))

    # Create an a video writer so that the resuls can be saved.
    sourceName, file_extension = os.path.splitext(programParams.input_path)
    output_path = sourceName + '_outvibe' + file_extension
    writer = cv2.VideoWriter(output_path, codec, fps, (width, height))
    if not writer:
def main():
    # Initialize pygame
    # os.environ['SDL_VIDEODRIVER'] = 'windib'
    pygame.init()
    pygame.mouse.set_cursor(*pygame.cursors.tri_left)
    os.environ["SDL_VIDEO_CENTERED"] = "1"  #

    Game.framesPerSecond = 48
    Game.speedModifier = 1
    Game.autoMode = 0
    Game.balanceMode = 0
    drawTick = 0

    Game.state = STATE_INITMENU
    Game.mainMenuFont = pygame.font.Font(None, 32)
    Game.gameMenuFont = pygame.font.Font(None, 24)
    Game.enemyCountFont = pygame.font.Font(None, 24)
    Game.popUpFont = pygame.font.Font(None, 24)

    # Set the height and width of the screen
    size = [mapWidth * tileSize + rightMenuSize, mapHeight * tileSize + bottomMenuSize]
    screen = pygame.display.set_mode(size)
    screen.fill(background)
    layer = pygame.Surface(size)
    layer.set_colorkey(spritepink)
    layer.set_alpha(120)

    # Variables to tell Game what to draw
    Game.drawMouseOver = 0
    Game.drawMessage = 0
    Game.repaintMap = 0
    Game.placedTower = 0

    Game.EndLevelDraw = 0
    Game.restartWave = 0
    Game.nextWave = 0

    # Initialize the Images
    Images.init()

    # Initialize the MainMenu
    mainMenu = cMenu(
        128,
        320,
        20,
        5,
        "vertical",
        100,
        screen,
        [
            ("Campaign", 1, None),
            ("Challenge 1", 2, None),
            ("Challenge 2", 3, None),
            ("Challenge 3", 4, None),
            ("Challenge 4", 5, None),
            ("Challenge 5", 6, None),
            ("Random Level", 7, None),
            ("Exit", 8, None),
        ],
        Images.Background,
    )
    gameMenu = cMenu(
        128,
        320,
        20,
        5,
        "vertical",
        100,
        screen,
        [("Resume", 1, None), ("Back to main menu (current progress will be lost!)", 2, None)],
        Images.Background,
    )

    menubackground = Images.Background
    interfaceBGwashed = Images.InterfaceBGwashed
    InterfaceBGopaque = Images.InterfaceBGopaque
    rect_list = []

    # Initialize the map
    map = Map.Map(mapWidth, mapHeight)

    # Initialize the wave
    wave = Wave.Wave(map)

    # Initialize the Towers class
    towers = Towers.Towers(map, wave)
    wave.setTowers(towers)

    # Initialize the shot graphics
    shots = Shots()

    # Initialize the tower bar
    towerBar = TowerBar.TowerBar(0, mapHeight * tileSize, map, towers)

    # Initialize the menu
    menu = Menu.Menu(map, wave, towers)

    # Initialize the level
    Game.level = Level.Level(map, wave, towers, towerBar, menu)

    # Set title of screen
    pygame.display.set_caption("AI Tower Defense -- (c) POB & ND")

    # Loop until the user clicks the close button.
    close_game = False

    # Used to manage how fast the screen updates
    clock = pygame.time.Clock()

    # -------- Main Program Loop -----------
    while close_game == False:
        ## Init menu
        if Game.state == STATE_INITMENU:
            screen.fill(background)
            screen.blit(menubackground, (0, 0))
            pygame.display.flip()
            mainMenustate = 0
            mainMenuprev_state = 1
            Game.state = STATE_MENU

        ## Menu
        elif Game.state == STATE_MENU:
            if mainMenuprev_state != mainMenustate:
                pygame.event.post(pygame.event.Event(EVENT_CHANGE_STATE, key=0))
                mainMenuprev_state = mainMenustate
            e = pygame.event.wait()
            if e.type == pygame.KEYDOWN or e.type == EVENT_CHANGE_STATE:
                if mainMenustate == 0:
                    rect_list, mainMenustate = mainMenu.update(e, mainMenustate)
                # Campagin
                elif mainMenustate == 1:
                    Game.state = STATE_LOADGAME
                    Game.level.loadLevel("Campaign1")
                # Challenges
                elif mainMenustate == 2:
                    Game.state = STATE_LOADGAME
                    Game.level.loadLevel("Challenge1")
                elif mainMenustate == 3:
                    Game.state = STATE_LOADGAME
                    Game.level.loadLevel("Challenge2")
                elif mainMenustate == 4:
                    Game.state = STATE_LOADGAME
                    Game.level.loadLevel("Challenge3")
                elif mainMenustate == 5:
                    Game.state = STATE_LOADGAME
                    Game.level.loadLevel("Challenge4")
                elif mainMenustate == 6:
                    Game.state = STATE_LOADGAME
                    Game.level.loadLevel("Challenge5")
                # Random Level
                elif mainMenustate == 7:
                    Game.state = STATE_LOADGAME
                    Game.level.loadLevel("Campaign1")
                # Quit
                else:
                    pygame.quit()
                    sys.exit()
            if e.type == pygame.QUIT:
                pygame.quit()
                sys.exit()
            pygame.display.update(rect_list)

        ## Init Game Menu
        elif Game.state == STATE_INITGAMEMENU:
            screen.fill(background)
            screen.blit(menubackground, (0, 0))
            pygame.display.flip()
            gameMenustate = 0
            gameMenuprev_state = 1
            Game.state = STATE_GAMEMENU

        ## Game Menu
        elif Game.state == STATE_GAMEMENU:
            if gameMenuprev_state != gameMenustate:
                pygame.event.post(pygame.event.Event(EVENT_CHANGE_STATE, key=0))
                gameMenuprev_state = gameMenustate
            e = pygame.event.wait()
            if e.type == pygame.KEYDOWN or e.type == EVENT_CHANGE_STATE:
                if e.key == pygame.K_ESCAPE:
                    Game.state = STATE_LOADGAME
                else:
                    if gameMenustate == 0:
                        rect_list, gameMenustate = gameMenu.update(e, gameMenustate)
                    elif gameMenustate == 1:
                        Game.state = STATE_LOADGAME
                    elif gameMenustate == 2:
                        shots.clear()
                        towers.clear()
                        wave.clear()
                        Game.state = STATE_INITMENU
            if e.type == pygame.QUIT:
                pygame.quit()
                sys.exit()
            pygame.display.update(rect_list)

        ## POP UP
        if Game.state == STATE_INITPOPUP:
            Game.state = STATE_POPUP
            # Add a background or the game behind the text...
            screen.blit(InterfaceBGopaque, (0, 0))
            drawMap(map, screen)
            menu.draw(screen)
            Game.redrawSPBtn = 1
            menu.drawSPBtn(screen)
            towerBar.draw(screen)
            towerBar.moneyUpdated(screen)
            towerBar.showTower(screen)
            towerBar.updateTowerCost = 0
            pygame.display.flip()

        elif Game.state == STATE_POPUP:
            drawTick += 1
            if drawTick >= clock.get_fps() / 24:
                drawTick = 0
                Game.popUp.paint(screen)
                pygame.display.update(167, 90, 658, 465)
            for event in pygame.event.get():
                if event.type == pygame.KEYDOWN:
                    if event.key == pygame.K_ESCAPE:
                        Game.dialog.close()
                    elif event.key == pygame.K_RETURN:
                        Game.dialog.close()
                    elif event.key == pygame.K_SPACE:
                        Game.dialog.close()
                elif event.type == pygame.QUIT:
                    sys.exit()
                else:
                    Game.popUp.event(event)

        ## Wave finished [Draw then restartWave or start next Wave] -> PopUp lorsque level finished?
        elif Game.state == STATE_ENDWAVE:
            if not Game.autoMode and not Game.balanceMode:
                drawTick += 1
                if drawTick >= clock.get_fps() / 24:
                    Game.EndLevelDraw += 1
                    drawTick = 0
                    drawGame(map, towerBar, towers, wave, shots, menu, screen, layer)
                    if Game.EndLevelDraw >= 5:
                        Game.state = STATE_PREPARATION
                        Game.EndLevelDraw = 0
                        if Game.restartWave:
                            Game.restartWave = 0
                            Game.level.restartWave()
                        elif Game.nextWave:
                            Game.nextWave = 0
                            Game.level.nextWave()
                        else:
                            print "Confus :("
        else:
            for event in pygame.event.get():  # User did something
                # If user clicked close
                if event.type == pygame.QUIT:
                    close_game = True  # Flag that we are done so we exit this loop

                ## Mouse Events
                # User moves over the mouse
                elif event.type == pygame.MOUSEMOTION:
                    updateUnderMouse(map, towerBar, towers)

                # User clicks the mouse. Get the position
                elif event.type == pygame.MOUSEBUTTONDOWN:
                    pos = pygame.mouse.get_pos()
                    # Change the x/y screen coordinates to grid coordinates (For the map)
                    column = pos[0] // tileSize
                    row = pos[1] // tileSize
                    # Inside Map
                    if (column < mapWidth) and (row < mapHeight):
                        if towerBar.selectedTower > -1:
                            if map.M[row][column] == car_turret:
                                if map.T[row][column] == 0:
                                    # TODO : money check
                                    towers.placeTower(towerBar.selectedTower, 0, row, column)
                                    Game.placedTower = 1
                                else:
                                    towers.updateTower(towerBar.selectedTower, row, column)
                                    Game.placedTower = 1
                        elif towerBar.selectedTower == TowerUPGRADE:
                            if map.M[row][column] == car_turret:
                                if map.T[row][column] != 0:
                                    towers.updateTower(towerBar.selectedTower, row, column)
                                    Game.placedTower = 1
                        elif towerBar.selectedTower == TowerERASE:
                            if map.M[row][column] == car_turret:
                                if map.T[row][column] != 0:
                                    towers.eraseTower(row, column)
                                    Game.placedTower = 1
                        updateUnderMouse(map, towerBar, towers)

                    # Inside Menu
                    elif column >= mapWidth:
                        menu.onClick(pos, map)

                    # Inside Tower Bar
                    else:
                        towerBar.onClick(pos)

                ## Keyboard Events
                elif event.type == pygame.KEYDOWN:
                    if event.key == pygame.K_ESCAPE:
                        Game.state = STATE_INITGAMEMENU
                    elif event.key == pygame.K_SPACE:
                        if Game.state == STATE_GAME:
                            Game.redrawSPBtn = 1
                            Game.state = STATE_PREPARATION
                        elif Game.state == STATE_PREPARATION:
                            Game.redrawSPBtn = 2
                            Game.state = STATE_GAME
                    elif event.key == pygame.K_1:
                        if 0 in Game.level.levelTowers:
                            towerBar.selectTower(0)
                    elif event.key == pygame.K_2:
                        if 1 in Game.level.levelTowers:
                            towerBar.selectTower(1)
                    elif event.key == pygame.K_3:
                        if 2 in Game.level.levelTowers:
                            towerBar.selectTower(2)
                    elif event.key == pygame.K_4:
                        if 3 in Game.level.levelTowers:
                            towerBar.selectTower(3)
                    elif event.key == pygame.K_5:
                        if 4 in Game.level.levelTowers:
                            towerBar.selectTower(4)
                    elif event.key == pygame.K_6:
                        if 5 in Game.level.levelTowers:
                            towerBar.selectTower(5)
                    elif event.key == pygame.K_EQUALS:
                        Game.increaseSpeed(menu)
                    elif event.key == pygame.K_MINUS:
                        Game.reduceSpeed(menu)
                    elif event.key == pygame.K_c:
                        towers.resetCooldowns()
                    elif event.key == pygame.K_r:
                        towerBar.selectTower(TowerUPGRADE)
                    elif event.key == pygame.K_e:
                        towerBar.selectTower(TowerERASE)
                    elif event.key == pygame.K_a:
                        towers.clear()
                        wave.clear()
                        shots.clear()
                    elif event.key == pygame.K_t:
                        towers.clear()
                        shots.clear()
                    elif event.key == pygame.K_v:
                        Evaluate.evalPlayerPosition()
                    elif event.key == pygame.K_b:
                        Progress.prog()

            ## Init Game
            if Game.state == STATE_LOADGAME:
                Game.redrawSPBtn = 1
                Game.state = STATE_PREPARATION
                screen.blit(InterfaceBGopaque, (0, 0))
                # screen.fill(background)
                # pygame.draw.rect(screen, background, ([mapWidth*tileSize, 0, rightMenuSize, mapHeight*tileSize + bottomMenuSize]))
                # pygame.draw.rect(screen, background, ([0, mapHeight*tileSize, mapWidth*tileSize, bottomMenuSize]))

                # Draw the map
                drawMap(map, screen)

                # Draw the game information menu
                menu.draw(screen)
                Game.redrawSPBtn = 1
                menu.drawSPBtn(screen)

                towerBar.moneyUpdated(screen)
                towerBar.showTower(screen)
                towerBar.updateTowerCost = 0

                # Draw the tower bar ~ 0
                towerBar.draw(screen)

                pygame.display.flip()

            ## Game Paused
            if Game.state == STATE_PREPARATION:
                if Game.autoMode or Game.balanceMode:
                    Game.redrawSPBtn = 1
                    Game.state = STATE_GAME
                else:
                    ## Display
                    drawTick += 1
                    # print(clock.get_fps())
                    if drawTick >= clock.get_fps() / 24:
                        # print(clock.get_fps())
                        drawTick = 0
                        drawGame(map, towerBar, towers, wave, shots, menu, screen, layer)

            ## Game Running
            elif Game.state == STATE_GAME:
                # Spawn any new enemy in the wave queue
                wave.spawn()
                # Move the enemies
                wave.move()
                # Tower target
                towers.target(shots)

                ## Display
                if not Game.autoMode and not Game.balanceMode:
                    drawTick += 1
                    # print(clock.get_fps())
                    if drawTick >= clock.get_fps() / 24:
                        # print(clock.get_fps())
                        drawTick = 0
                        drawGame(map, towerBar, towers, wave, shots, menu, screen, layer)

        # Limit to 24 frames per second
        # print(pygame.time.get_ticks())
        clock.tick(Game.framesPerSecond * Game.speedModifier)

    pygame.quit()
Example #37
0
def message():
    
  try:    
    return render_template('resume.html', message=DataManager.GetRepoInfo.GetRepoInfo((session['message'])),message_user=session['message'],message_score=Evaluate.get_score(session['message']))
  except EnvironmentError:
   	return render_template('index.html')
Example #38
0
        data = newmodels.Models(data)
        joblib.dump(data, "trained.data")
        print 'Done with stage:', stage
        stage = 'test'

    if stage == 'test':
        models = joblib.load("trained.data", "r")
        testData = newparser.InsultParser(testingDataFile, isTestDataLabelled)
        evaluation = Evaluate.Evaluate(testData, models)
        ypred = evaluation.predictions
        print ypred
        if isTestDataLabelled == True:
            ytrue = testData.y
            print len(ytrue), len(ypred)
            fpr, tpr, thresholds = sklearn.metrics.roc_curve(ytrue, ypred)
            print fpr, tpr, thresholds
            print sklearn.metrics.auc(fpr, tpr)

            incorrect = Evaluate.crossValidate(ypred, ytrue)
            for key in incorrect:
                print key+2, ypred[key], ytrue[key], testData.X[key]
            Evaluate.crossValidate(evaluation.mainModelPredictions, ytrue)
            #incorrect=Evaluate.crossValidate(evaluation.invIdxModelPredictions, ytrue)
            #Evaluate.crossValidate(evaluation.distanceBasedClassifier, ytrue)

        else:
            Writer.writeCSVFile('insult_output.csv', ypred, testingDataFile)
        print 'Done with stage:', stage
        stage = 'done'

    def initialize(self, brain, node, args):        
        super(WeightedPathway,self).initialize(brain, node, args)
        self.get_connection_ranges(node)
                
        connectivity = self.safely_get(node, '__connectivity', 'string')
        

        if connectivity == 'probability':
            probability = self.safely_get(node, '__probability', 'float')
        elif connectivity == 'formula':
            distribution = self.safely_get(node, '__formula', 'string')
            formula = self.construct_formula_single(distribution)
        elif connectivity == 'file':
            from_file = self.safely_get(node, '__file', 'string')
            print './files/'+from_file
            try:
                conn_matrix = np.loadtxt('./files/'+from_file)
            except:
                where = self.get_where()
                what = 'Error loading connectivity file: ./files/'+ from_file
                raise BSException(where, what)
            mtx_size = conn_matrix.shape
            required = (self.postLast-self.postFirst+1, self.preLast-self.preFirst+1)
            if mtx_size[1] < required[1] or mtx_size[0] < required[0]:
                where = self.get_where()
                what = 'Connectivity file to small: ' + str(mtx_size) + ' requires ' + str(required)
                raise BSException(where, what)
        elif connectivity == 'topographic' and self.inputs != self.outputs:
            where = self.get_where()
            what = 'Number of inputs and outputs must be the same for topographic connection'
            raise BSException(where, what)



        target_size = self.postLast-self.postFirst
        
        formulas = self.construct_formulas(node) 
        sources =range(self.preFirst, self.preLast+1)

        col = 0
        src=[]
        trg=[]
        for n in sources:
            targets=[]            
            if connectivity == 'full':
                targets += range(self.postFirst, self.postLast)
            elif connectivity == 'probability':
                targets += random.sample(range(self.postFirst, self.postLast), int(probability*target_size))   
            elif connectivity == 'formula':
                for t in range(self.postFirst, self.postLast):             
                    try:          
                        result = Evaluate.evaluate_single(formula)                 
                    except:
                        where = self.get_where()
                        what = 'Error evaluating distribution: ' + distribution + ' -> ' + formula
                        raise BSException(where, what)
                    if result:
                        targets.append(t)
            elif connectivity == 'file': 
                c=0
                for t in range(self.postFirst, self.postLast):
                    if conn_matrix[c,col] != 0:
                        targets.append(t)
                    c+=1
            elif connectivity == 'topographic':
                targets += [self.postFirst+col]
                
            
            src+=[n]*len(targets)
            trg+=targets
            col+=1
            
        if len(targets) > 0: 
            sa = self.evaluate_formulas_batch(formulas, node, self.attributes,dict(), dict(), len(trg))                                   
            self.add_synapses(brain, src, trg, sa)