Ejemplo n.º 1
0
def main(job_id, params):
    return evaluate(job_id, params, conf)
    try:
        return evaluate(job_id, params, conf)
    except Exception as ex:
        print ex
        print 'An error occurred in stdp.py'
        return np.nan
Ejemplo n.º 2
0
def main(job_id, params):
    for p in params:
        params[p] = params[p][0]
    return evaluate(job_id, params, conf, jobs=2)[conf['criterion_name']]
    try:
        return evaluate(job_id, params, conf)
    except Exception as ex:
        print ex
        print 'An error occurred in stdp.py'
        return np.nan
Ejemplo n.º 3
0
def refit_and_save_model(dataset, transformer_list, score_func_list,
                         num_attr_list, classifier_list, scoring, **kwargs):
    """
    This function fits transformers, best score func and best classifier on dataset.X_train
    and persist into different files: transformers, best score function and best classifier.
    File names are of the form name + '. pkl' where name is taken from the configuration.
    :param dataset: dataset
    :param transformers: list of tuples ('transformer_name', transformer object)
    :param score_funcs: list of tuples ('score_func_name', score_func object)
    :param num_attr: list with number of attributes
    :param classifiers: list of tuples: ('cls_name', classifier objects)
    :param scoring: array with shape(len(classifiers), len(num_attr), len(score_funcs)). Each positions has the score
    for each tuple (classifier, num_attr, score_func)
    :return: None
    """
    i, j, z = np.unravel_index(scoring.argmax(), scoring.shape)
    # fit and persist transformers
    #X_test, y_test == None
    X_train, _, y_train, _, X_names = process_transformers(
        dataset, transformer_list, k_train=range(0, dataset.n_instances))
    persist_objects(
        zip(*transformer_list)[0],
        zip(*transformer_list)[1],
        join(dataset.configuration['data_path'], 'model'))

    # fit and persist score func
    X_trunc_train, X_trunc_test, X_trunc_names = features_selection(
        score_func_list[z][1],
        num_attr_list[j],
        X_names,
        X_test=None,
        X_train=X_train,
        y_train=y_train)
    persist_objects(
        zip(*[score_func_list[z]])[0],
        zip(*[score_func_list[z]])[1],
        join(dataset.configuration['data_path'], 'model'))

    save_data_transformed(X_trunc_train, y_train, X_trunc_names, **kwargs)

    # fit and persist classifier
    evaluate(classifier_list[i][1],
             X_test=None,
             y_test=None,
             X_train=X_trunc_train,
             y_train=y_train)
    persist_objects(
        zip(*[classifier_list[i]])[0],
        zip(*[classifier_list[i]])[1],
        join(dataset.configuration['data_path'], 'model'))
Ejemplo n.º 4
0
	def process(self, train, test, test_index):
		self.__init_matrix(train, test, test_index)

		iteration = 1
		diff = 100
		js = 0
		res = []
		
		while (diff > self.threshold or iteration < 10) and (iteration <= 300):
			istart = time.clock()
		
			u, v = self.__calc_derivative()
			self.u_matrix -= self.alpha * u
			self.v_matrix -= self.alpha * v
			self.uv_matrix = np.dot(self.u_matrix, self.v_matrix.T)

			jsn = self.__calc_target()
			diff = abs(jsn - js) / jsn
			js = jsn
			rmse = evaluate(self.uv_matrix, self.test, self.test_index)
			res.append((iteration, jsn, rmse))
			iend = time.clock()
			print "[log] k = %d lambda= %0.4f" % (self.k, self.lamb)
			print """[log] now %d iterations: \n[log] js = %.4f \n[LOG] diff = %.4f\n[LOG] rmse = %.5f\n[log] this iteration time spent %.2f seconds\n""" \
				% (iteration, jsn, diff, rmse, (iend - istart))
			iteration += 1

		return res
Ejemplo n.º 5
0
def add_results(results, possible_moves, start, orig_board):
    '''
        + Given a sequence like this RrlL, this function interpret this sequence into actual moves and kills.
        + R: down Right
        + r: up Right
        + l: up Left
        + L: down Left
        + Inputs:
            + results: list of one or multiple sequence(s)
            + possible_moves: where we update (append) new possible moves
            + start: this piece's initail place
            + orig_board: the original board, useful and necessary for king moves tracking
        + Outputs: None, just appending to possible_moves variable reference

        + Logic is simple, updat the start and the board with the sequence char (R,r,L,l)
    '''
    for result in results:
        possible_move = [start]
        board = deepcopy(orig_board)
        # cost = 0
        i, j = start[0], start[1]
        for char in result:
            if char == '' or char == ' ':
                continue
            if char == 'R':
                board[i + 1][j + 1] = 0  #kill it
                board[i + 2][j + 2] = board[i][j]  #move me
                board[i][j] = 0  #from there
                # cost += 1
                i += 2
                j += 2
            elif char == 'L':
                board[i + 1][j - 1] = 0  #kill it
                board[i + 2][j - 2] = board[i][j]  #move me
                board[i][j] = 0  #from there
                # cost += 1
                i += 2
                j -= 2
            elif char == 'r':
                board[i - 1][j + 1] = 0  #kill it
                board[i - 2][j + 2] = board[i][j]  #move me
                board[i][j] = 0  #from there
                # cost += 1
                i -= 2
                j += 2
            elif char == 'l':
                board[i - 1][j - 1] = 0  #kill it
                board[i - 2][j - 2] = board[i][j]  #move me
                board[i][j] = 0  #from there
                # cost += 1
                i -= 2
                j -= 2

        possible_move.append((i, j))
        possible_move.append(evaluate(board))
        possible_move.append(board)
        possible_moves.append(possible_move)
Ejemplo n.º 6
0
def run_model() -> None:
    "Execute model according to the configuration"
    print('#' * 5, 'PARAMETERS', '#' * 5)
    print_args(ARGS)
    print('#' * 10, '\n\n')

    # Which model to use?
    build_fn, reader_type = common.get_modelfn_reader()
    reader = common.create_reader(reader_type)

    def optimiser(model: Model) -> torch.optim.Optimizer:
        return AdamW(model.parameters(), lr=1e-3, weight_decay=1e-3)

    # Create SAVE_FOLDER if it doesn't exist
    ARGS.SAVE_PATH.mkdir(exist_ok=True, parents=True)
    train_dataset = load_data(data_path=ARGS.TRAIN_DATA_PATH,
                              reader=reader,
                              pre_processed_path=ARGS.TRAIN_PREPROCESSED_PATH)
    val_dataset = load_data(data_path=ARGS.VAL_DATA_PATH,
                            reader=reader,
                            pre_processed_path=ARGS.VAL_PREPROCESSED_PATH)
    test_dataset = load_data(data_path=ARGS.TEST_DATA_PATH,
                             reader=reader,
                             pre_processed_path=ARGS.TEST_PREPROCESSED_PATH)

    model = train_model(build_fn,
                        train_data=train_dataset,
                        val_data=val_dataset,
                        test_data=test_dataset,
                        save_path=ARGS.SAVE_PATH,
                        num_epochs=ARGS.NUM_EPOCHS,
                        batch_size=ARGS.BATCH_SIZE,
                        optimiser_fn=optimiser,
                        cuda_device=ARGS.CUDA_DEVICE,
                        sorting_keys=reader.keys)

    common.evaluate(model, reader, test_dataset)
    result = make_prediction(model, reader, verbose=False)
    common.error_analysis(model, test_dataset)

    print('Save path', ARGS.SAVE_PATH)

    cuda_device = 0 if is_cuda(model) else -1
    test_load(build_fn, reader, ARGS.SAVE_PATH, result, cuda_device)
Ejemplo n.º 7
0
def eval(x, id, ret_q):
    p = dict(zip(var_names, x))
    for param in p:
        p[param] = scale_from_cma(p[param], cma_conf[param]["min"], cma_conf[param]['max'], bounds[0], bounds[1])
#    p["size"] = int(p["size"])
    try:
        res = evaluate(id, p, conf, jobs, verbose=False)
        ret = res[ conf['criterion_name'] ]
        ret_q.put(ret)
    except Exception as ex:        
        ret_q.put(ex)
Ejemplo n.º 8
0
def eval(x, id, ret_q):
    p = dict(zip(var_names, x))
    for param in p:
        p[param] = scale_from_cma(p[param], cma_conf[param]["min"], cma_conf[param]['max'], bounds[0], bounds[1])
#    p["size"] = int(p["size"])
    try:
        res = evaluate(id, p, conf, jobs, verbose=False)
        ret = res[ conf['criterion_name'] ]
        ret_q.put(ret)
    except Exception as ex:        
        ret_q.put(ex)
Ejemplo n.º 9
0
def main(unused_argv):
    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
        level=logging.DEBUG if FLAGS.debug else logging.INFO)

    (train_xs, train_ys), (test_xs, test_ys), (dev_xs, dev_ys) \
        = load_bibtex('bibtex-train.arff', 'bibtex-test.arff')

    _, init_params = init_mlp(npr.PRNGKey(FLAGS.random_seed),
                              (FLAGS.batch_size, INPUTS))

    opt_init, opt_update, get_params = adam(0.001)
    # opt_init, opt_update, get_params = momentum(0.001, 0.9)
    # opt_init, opt_update, get_params = sgd(0.001)
    opt_state = opt_init(init_params)

    @jit
    def update(i, opt_state, batch):
        params = get_params(opt_state)
        loss, g = value_and_grad(cross_entropy_loss)(params, *batch)
        return opt_update(i, g, opt_state), loss

    num_batches = int(onp.ceil(len(train_xs) / FLAGS.batch_size))
    train_stream = data_stream(train_xs,
                               train_ys,
                               batch_size=FLAGS.batch_size,
                               random_seed=FLAGS.random_seed,
                               infty=True)
    itercount = itertools.count()
    best_f1 = 0.
    for epoch in range(FLAGS.epochs):
        step_loss = 0.
        for _ in tqdm(range(num_batches)):
            opt_state, loss = update(next(itercount), opt_state,
                                     next(train_stream))
            step_loss += loss
        logger.info(f'epoch: {epoch} loss = {step_loss / num_batches}')
        f1 = evaluate(get_params(opt_state),
                      inference,
                      test_xs,
                      test_ys,
                      batch_size=FLAGS.batch_size,
                      threshold=0.5)
        if f1 > best_f1:
            best_f1 = f1
    logger.info(f'best F1 score = {best_f1}')
Ejemplo n.º 10
0
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "dataset_name",
        help="Dataset. Options are: [twitter|webcrawl|peru|lleida]")
    args = parser.parse_args()

    # Get the configuration module according to the dataset
    configuration = load_configuration(args.dataset_name + '_cfg')
    # Create and load the dataset based on its configuration
    dataset = configuration['dataset_class'](configuration)
    dataset.load_dataset()

    transformers = zip(
        dataset.configuration['src_transformer'],
        load_objects(dataset.configuration['src_transformer'],
                     join(dataset.configuration['data_path'], 'model')))
    score_func = load_objects([dataset.configuration['score_func']],
                              join(dataset.configuration['data_path'],
                                   'model'))
    classifier = load_objects([dataset.configuration['classifier']],
                              join(dataset.configuration['data_path'],
                                   'model'))

    show_conf_matrix = ('show_conf_matrix' in configuration
                        and configuration['show_conf_matrix']) or False

    _, X_test, _, y_test, X_names = process_transformers(
        dataset, transformers, None, range(0, dataset.n_instances))
    _, X_trunc_test, X_trunc_names = features_selection(
        score_func[0], None, X_names, X_test, None, None)

    # if save_data then we save X_trunc_test and y_test into file
    save_data_transformed(X_trunc_test, y_test, X_trunc_names, configuration)

    score = evaluate(classifier[0], X_trunc_test, y_test, None, None,
                     show_conf_matrix, dataset.get_evaluation_function(),
                     **dataset.get_evaluation_function_args())
    print score
Ejemplo n.º 11
0
def main(job_id, params):
    return evaluate(job_id, params, conf)
Ejemplo n.º 12
0
import math
from common import evaluate, Data

bidAmount = 110


def getBidPrice(bid):
    return bidAmount


print('Constant Bid: %d' % bidAmount)
evaluate('dataset/validation.csv', getBidPrice)
Ejemplo n.º 13
0
def construct_full_tree(board, pl):
    '''
        + The main part starts here.
        + this function constructs nodes, a node represents a state.
        + state is the board after certain moves.
        + this function returns only the min_max Tree with pruning if asked for.
        + The algorithm keeps creating nodes as long as the last depth (level) nodes
            didn't exceed the time when they were being created.
        + Inputs: board and a player
        + Outputs: tree
    '''    
    print ("THINKING...This might take awhile.")
    #Crete the Root Node, add it to the tree and the Q
    player = deepcopy(pl)       #just to make sure, no shallow copy occurs
    root = Node(board,player,evaluate(board))   #the root
    tree = Tree(root)
    tree.inc_depth()
    switch = "switch"
    Q = deque()                 #Q for adding nodes to be spanned later
    Q.append(root)              #append the root of course
    Q.append(switch)            #switch: new generation is comming. ie. new level, new depth.
    new_gen = True              #for tree construction
    this_is_root = True
    start_time, end_time = None, None
    
    while len(Q) > 1:
        
        root = Q.popleft()
        if root == "switch":
            #swap players
            player = 1 if player == -1 else -1
            Q.append(switch)
            end_time = time.time()

            if start_time == None:
                tree.inc_depth()
            elif end_time - start_time < TUNE:
                if VERBOSE: print (f'Tree depth till now: {tree.depth}\t\tTime: {end_time-start_time}')
                tree.inc_depth()
                #after each level produced, prune if you want to.
                if PRUNE: tree.prune()
                if VERBOSE_DEEP: tree.print_tree()
            else:
                print (f"Time Exceeded at depth: {tree.depth}\t\tTime: {end_time-start_time}")

                return tree
            start_time = time.time()
            new_gen = True
            #this will never happen, unless you have a SUPER computer and N is like 1e4
            if tree.depth == DEPTH:
                break
            continue
        
        if root.pruned:
            if VERBOSE_DEEP: print ("Can't Go Down there, it's pruned")
            continue

        #Get the possible moves from this Node
        possible_moves = where_can_i_move_next(root.board, player)
        
        #No moves from here
        if len(possible_moves) == 0:
            #cost will be massive, cuz this is a WIN
            new_cost = MAX_POS if player == 1 else MAX_NEG
            root.update_cost(new_cost)

        for pos in possible_moves:
            #for each possible move create and append a node
            node = Node (pos[3], player, pos[2])
            tree.append_node(node, root, new_gen)
            new_gen = False
            player_swap = 1 if player == -1 else -1
            score = how_many (pos[3],player_swap)
            if score == 0:
                new_cost = MAX_POS if player == 1 else MAX_NEG
                node.update_cost(new_cost)
            if len(possible_moves) == 1 and this_is_root:
                Q.append(node)
                return tree
            else:
                Q.append(node)
        this_is_root = False

    return tree
Ejemplo n.º 14
0
	def _evaluate(self, p_matrix, test, test_index):
		return evaluate(p_matrix, test, test_index)
Ejemplo n.º 15
0
def main(unused_argv):
    logging.basicConfig(
        format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
        level=logging.DEBUG if FLAGS.debug else logging.INFO)

    (train_xs, train_ys), (test_xs, test_ys), (dev_xs, dev_ys) \
        = load_bibtex('bibtex-train.arff', 'bibtex-test.arff')

    init_params = init_param(npr.PRNGKey(FLAGS.random_seed),
                             input_units=INPUTS,
                             label_size=LABELS,
                             feature_size=FLAGS.feature_size,
                             label_units=FLAGS.label_units,
                             hidden_units=FLAGS.hidden_units)

    @jit
    def update_pretrain(i, opt_state, batch):
        params = get_params(opt_state)
        loss, g = value_and_grad(pretrain_loss)(params, *batch)
        return opt_update(i, g, opt_state), loss

    def update_ssvm(i, opt_state, batch, pretrain_global_energy=False):
        params = get_params(opt_state)
        loss, g = value_and_grad(ssvm_loss)(
            params, *batch, pretrain_global_energy=pretrain_global_energy)
        return opt_update(i, g, opt_state), loss

    stages = [
        Config(batch_size=FLAGS.pretrain_batch_size,
               epochs=FLAGS.pretrain_epoch,
               update_fun=update_pretrain,
               inference_fun=inference_pretrained,
               optimizer=momentum(0.001, 0.95),
               msg='pretraining feature network'),
        Config(batch_size=FLAGS.ssvm_batch_size,
               epochs=FLAGS.energy_pretrain_epoch,
               update_fun=partial(update_ssvm, pretrain_global_energy=True),
               inference_fun=inference,
               optimizer=momentum(0.001, 0.95),
               msg='pretraining energy network'),
        Config(batch_size=FLAGS.ssvm_batch_size,
               epochs=FLAGS.e2e_train_epoch,
               update_fun=update_ssvm,
               inference_fun=inference,
               optimizer=momentum(0.001, 0.95),
               msg='finetune the entire network end-to-end')
    ]
    best_f1 = 0.
    params = init_params
    for stage in stages:
        opt_init, opt_update, get_params = stage.optimizer
        opt_state = opt_init(params)
        logger.info(stage.msg)
        num_batches = int(onp.ceil(len(train_xs) / stage.batch_size))
        train_stream = data_stream(train_xs,
                                   train_ys,
                                   batch_size=stage.batch_size,
                                   random_seed=FLAGS.random_seed,
                                   infty=True)
        itercount = itertools.count()
        for epoch in range(stage.epochs):
            step_loss = 0.
            for _ in tqdm(range(num_batches)):
                opt_state, loss = stage.update_fun(next(itercount), opt_state,
                                                   next(train_stream))
                step_loss += loss
            logger.info(f'epoch: {epoch} loss = {step_loss / num_batches}')
            f1 = evaluate(get_params(opt_state),
                          stage.inference_fun,
                          test_xs,
                          test_ys,
                          batch_size=stage.batch_size)
            if f1 > best_f1:
                best_f1 = f1
        params = get_params(opt_state)
    logger.info(f'best F1 score = {best_f1}')
Ejemplo n.º 16
0
"""
Calculate the STOI score
"""

import common
from pystoi.stoi import stoi as _stoi

stoi = common.evaluate(_stoi)
Ejemplo n.º 17
0
def where_can_i_move_next(board, player=1, verbose=False):
    '''
        + Given player (1/-1) and the board settings, decide the pieces that can move next.
        + Returns: list of possible moves
            + Possible_move: [typle of start, tuple of end, how_many_killed, new_board]

        + Logic is divided into groups:
        + Group 1: for player 1, divided into two sections
            + section 1: for soldiers and kings, check if they can move down-left or down-right
                + checks if they can have multiple moves and kills
            + section 2: for kings only, check if the can move up-left or up-right
                + checks if they can have multiple moves also.
        + Group 2: same for player 2 
    '''
    #where the moves are stored at
    possible_moves = []
    count_pieces = non_zeros_count(board)
    #loop throught the entire board for player 1/-1
    for i in range(N):
        for j in range(N):
            #no pawns are here from the start.
            if board[i][j] == 0:
                continue

            #pos: player 1 whther it was soldier or king
            if board[i][j] >= 1 and player == 1:
                #player 1 soldier or king
                if (i + 1 < N) and (j - 1 >= 0):
                    #not out of bound
                    if (board[i + 1][j - 1] == 0):
                        #can move left down
                        new_board = deepcopy(board)
                        new_board[i + 1][j - 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i + 1][j - 1] = board[i][j]  #there
                        possible_moves.append([(i, j), (i + 1, j - 1),
                                               evaluate(new_board), new_board])

                    #sequence of moves may occur
                    elif (board[i + 1][j - 1] <= -1) and (
                            i + 2 < N
                            and j - 2 >= 0) and board[i + 2][j - 2] == 0:
                        new_board = deepcopy(board)
                        new_board[i + 1][j - 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i + 2][j - 2] = board[i][j]  #there
                        results = []
                        sequence_of_moves(new_board, (i, j), (i + 2, j - 2),
                                          board[i][j], results, 'L')
                        new_board = deepcopy(board)
                        add_results(results, possible_moves, (i, j), new_board)

                if (i + 1 < N) and (j + 1 < N):
                    if (board[i + 1][j + 1] == 0):
                        #not out of bound, and free move "right" down
                        new_board = deepcopy(board)
                        new_board[i + 1][j + 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i + 1][j + 1] = board[i][j]  #there
                        possible_moves.append([(i, j), (i + 1, j + 1),
                                               evaluate(new_board), new_board])

                    elif (board[i + 1][j + 1] <=
                          -1) and (i + 2 < N
                                   and j + 2 < N) and board[i + 2][j + 2] == 0:
                        new_board = deepcopy(board)
                        new_board[i + 1][j + 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i + 2][j + 2] = board[i][j]  #there
                        results = []
                        sequence_of_moves(new_board, (i, j), (i + 2, j + 2),
                                          board[i][j], results, 'R')
                        new_board = deepcopy(board)
                        add_results(results, possible_moves, (i, j), new_board)

            if board[i][j] == 2 and player == 1:
                ######### player 1 king ONLY#######
                #check if it can go up!
                if (i - 1 >= 0) and (j - 1 >= 0):
                    #not out of bound
                    if (board[i - 1][j - 1] == 0):
                        #can move left
                        new_board = deepcopy(board)
                        new_board[i - 1][j - 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i - 1][j - 1] = board[i][j]  #there
                        possible_moves.append([(i, j), (i - 1, j - 1),
                                               evaluate(new_board), new_board])

                    #sequence of moves may occur
                    elif (board[i - 1][j - 1] <= -1) and (
                            i - 2 >= 0
                            and j - 2 >= 0) and board[i - 2][j - 2] == 0:
                        new_board = deepcopy(board)
                        new_board[i - 1][j - 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i - 2][j - 2] = board[i][j]  #there
                        results = []
                        sequence_of_moves(new_board, (i, j), (i - 2, j - 2),
                                          board[i][j], results, 'l')
                        new_board = deepcopy(board)
                        add_results(results, possible_moves, (i, j), new_board)

                if (i - 1 >= 0) and (j + 1 < N):
                    if (board[i - 1][j + 1] == 0):
                        #not out of bound, and free move "right" up
                        new_board = deepcopy(board)
                        new_board[i - 1][j + 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i - 1][j + 1] = board[i][j]  #there
                        possible_moves.append([(i, j), (i - 1, j + 1),
                                               evaluate(new_board), new_board])

                    elif (board[i - 1][j + 1] <=
                          -1) and (i - 2 >= 0
                                   and j + 2 < N) and board[i - 2][j + 2] == 0:
                        new_board = deepcopy(board)
                        new_board[i - 1][j + 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i - 2][j + 2] = board[i][j]  #there
                        results = []
                        sequence_of_moves(new_board, (i, j), (i - 2, j + 2),
                                          board[i][j], results, 'r')
                        new_board = deepcopy(board)
                        add_results(results, possible_moves, (i, j), new_board)

            #neg: player 2 whther it was soldier or king
            if board[i][j] <= -1 and player == -1:
                #player 2 soldier or king
                # LEFT UP
                if (i - 1 >= 0) and (j - 1 >= 0):
                    #not out of bound
                    if (board[i - 1][j - 1] == 0):
                        #can move left down
                        new_board = deepcopy(board)
                        new_board[i - 1][j - 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i - 1][j - 1] = board[i][j]  #there
                        possible_moves.append([(i, j), (i - 1, j - 1),
                                               evaluate(new_board), new_board])

                    #sequence of moves may occur
                    elif (board[i - 1][j - 1] >=
                          1) and (i - 2 >= 0
                                  and j - 2 >= 0) and board[i - 2][j - 2] == 0:
                        new_board = deepcopy(board)
                        new_board[i - 1][j - 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i - 2][j - 2] = board[i][j]  #there
                        results = []
                        sequence_of_moves(new_board, (i, j), (i - 2, j - 2),
                                          board[i][j], results, 'l')
                        new_board = deepcopy(board)
                        add_results(results, possible_moves, (i, j), new_board)
                # Right Up
                if (i - 1 >= 0) and (j + 1 < N):
                    if (board[i - 1][j + 1] == 0):
                        #not out of bound, and free move "right" down
                        new_board = deepcopy(board)
                        new_board[i - 1][j + 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i - 1][j + 1] = board[i][j]  #there
                        possible_moves.append([(i, j), (i - 1, j + 1),
                                               evaluate(new_board), new_board])

                    elif (board[i - 1][j + 1] >=
                          1) and (i - 2 >= 0
                                  and j + 2 < N) and board[i - 2][j + 2] == 0:
                        new_board = deepcopy(board)
                        new_board[i - 1][j + 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i - 2][j + 2] = board[i][j]  #there
                        results = []
                        sequence_of_moves(new_board, (i, j), (i - 2, j + 2),
                                          board[i][j], results, 'r')
                        new_board = deepcopy(board)
                        add_results(results, possible_moves, (i, j), new_board)

            if board[i][j] == -2 and player == -1:
                ######### player 2 king ONLY#######
                #check if it can go DOWN!
                if (i + 1 < N) and (j - 1 >= 0):
                    #not out of bound
                    if (board[i + 1][j - 1] == 0):
                        #can move left
                        new_board = deepcopy(board)
                        new_board[i + 1][j - 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i + 1][j - 1] = board[i][j]  #there
                        possible_moves.append([(i, j), (i + 1, j - 1),
                                               evaluate(new_board), new_board])

                    #sequence of moves may occur
                    elif (board[i + 1][j - 1] >=
                          1) and (i + 2 < N
                                  and j - 2 >= 0) and board[i + 2][j - 2] == 0:
                        new_board = deepcopy(board)
                        new_board[i + 1][j - 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i + 2][j - 2] = board[i][j]  #there
                        results = []
                        sequence_of_moves(new_board, (i, j), (i + 2, j - 2),
                                          board[i][j], results, 'L')
                        new_board = deepcopy(board)
                        add_results(results, possible_moves, (i, j), new_board)

                if (i + 1 < N) and (j + 1 < N):
                    if (board[i + 1][j + 1] == 0):
                        #not out of bound, and free move "right" up
                        new_board = deepcopy(board)
                        new_board[i + 1][j + 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i + 1][j + 1] = board[i][j]  #there
                        possible_moves.append([(i, j), (i + 1, j + 1),
                                               evaluate(new_board), new_board])

                    elif (board[i + 1][j + 1] >=
                          1) and (i + 2 < N
                                  and j + 2 < N) and board[i + 2][j + 2] == 0:
                        new_board = deepcopy(board)
                        new_board[i + 1][j + 1] = 0  #killed it
                        new_board[i][j] = 0  #move me
                        new_board[i + 2][j + 2] = board[i][j]  #there
                        results = []
                        sequence_of_moves(new_board, (i, j), (i + 2, j + 2),
                                          board[i][j], results, 'R')
                        new_board = deepcopy(board)
                        add_results(results, possible_moves, (i, j), new_board)

            #else is zero .. skip .. no elses btw.

    # Logic for only appending FORCED moves allowed, that ocurrs when an attack move is possible.
    forced = False
    # Collect the possible forced moves allowed here
    possible_moves_forced = []
    for pos in possible_moves:
        if non_zeros_count(
                pos[3]) < count_pieces:  #then for sure, some gotta be eaten!
            possible_moves_forced.append(pos)
            forced = True
    # Swap if.
    if forced:
        del possible_moves
        possible_moves = possible_moves_forced
    # Create Kings
    for pos in possible_moves:
        i, j = pos[1][0], pos[1][1]
        if player == 1:
            if pos[3][i][j] == 1 and i == N - 1:
                pos[3][i][j] = 2
        elif player == -1:
            if pos[3][i][j] == -1 and i == 0:
                pos[3][i][j] = -2
    # Show results in graphics mode.
    if verbose:
        for poss in possible_moves:
            br = Board(N)
            br.draw_board(poss[3])

    return possible_moves
Ejemplo n.º 18
0
def main(argv):
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "dataset_name",
        help="Dataset. Options are: [twitter|webcrawl|peru|lleida]")
    args = parser.parse_args()

    # Get the configuration module according to the dataset
    configuration = load_configuration(args.dataset_name + '_cfg')
    # Create and load the dataset based on its configuration
    dataset = configuration['dataset_class'](configuration)
    dataset.load_dataset()

    transformers = configuration['src_transformer']
    score_funcs = configuration['score_funcs']
    classifiers = configuration['classifiers']
    num_folds = ('num_folds' in configuration
                 and configuration['num_folds']) or DEFAULT_FOLDS
    show_conf_matrix = ('show_conf_matrix' in configuration
                        and configuration['show_conf_matrix']) or False

    num_attr = configuration['num_attr'] if 'num_attr' in configuration else []

    # Matrix to display accuracies (rows are classifiers and columns #of features used to train
    scoring = None
    for idx, kfold in enumerate(dataset.get_fold()):
        for k_train, k_test in kfold:
            X_train, X_test, y_train, y_test, X_names = process_transformers(
                dataset, transformers, k_train, k_test)

            # If there are elements in num_attr greater than the features in the dataset, use the number of features
            if len(num_attr) > 0:
                num_attr = list(
                    set([min(elem, X_train.shape[1]) for elem in num_attr]))
                num_attr.sort()
            else:
                num_attr.append(X_train.shape[1])

            if scoring == None:
                scoring = np.zeros(
                    (len(classifiers), len(num_attr), len(score_funcs)))

            # For each num_features, for each feature selector function, for each classifier: evaluate
            for k in range(0, len(score_funcs)):
                for j in range(0, len(num_attr)):
                    X_trunc_train, X_trunc_test, X_trunc_names = features_selection(
                        score_funcs[k][1], num_attr[j], X_names, X_test,
                        X_train, y_train)

                    print('_' * 80)
                    print 'Evaluating classifiers:'
                    for i in range(0, len(classifiers)):
                        print(
                            "Classifier: %s (Num. features: %d) (Score funct: %s) ..."
                            % (classifiers[i][0], num_attr[j],
                               score_funcs[k][1].score_func.__name__))

                        score = evaluate(
                            classifiers[i][1], X_trunc_test, y_test,
                            X_trunc_train, y_train, show_conf_matrix,
                            dataset.get_evaluation_function(),
                            **dataset.get_evaluation_function_args())
                        # score for test fold
                        scoring[i, j, k] += score

    # idx + 1 == bootstrap
    process_results(dataset, transformers, score_funcs, num_attr, classifiers,
                    np.divide(scoring, (idx + 1) * num_folds), **configuration)
 def evaluate(self, dataset):
     return evaluate(self.checkpoint.model, dataset)
Ejemplo n.º 20
0
#Hyperparameters
if (ADAM_OPTIMISER):
    optimizer = optim.Adam(model.parameters(), lr=LEARNING_RATE)
else:
    optimizer = optim.SGD(model.classifier.parameters(),
                          lr=0.001,
                          momentum=0.5)

#Train
best_valid_loss = float('inf')
for epoch in range(EPOCHS):  #Range of Epochs
    print(epoch)
    train_loss, train_acc = common.train(model, device, train_iterator,
                                         optimizer,
                                         criterion)  #Train Loss Calculation
    valid_loss, valid_acc = common.evaluate(
        model, device, valid_iterator, criterion)  #Validation Loss Calculation

    if valid_loss < best_valid_loss:  #Validation Loss - Is current lower than the saved validation loss.
        best_valid_loss = valid_loss  #Save the best loss (lowest)
        torch.save(model.state_dict(), MODEL_SAVE_PATH)  #Save the model

    print(
        f'| Epoch: {epoch+1:02} | Train Loss: {train_loss:.3f} | Train Acc: {train_acc*100:05.2f}% | Val. Loss: {valid_loss:.3f} | Val. Acc: {valid_acc*100:05.2f}% |'
    )

#3. OUTPUT

model.load_state_dict(
    torch.load(MODEL_SAVE_PATH))  #Load best weights from file
test_loss, test_acc = common.evaluate(model, device, valid_iterator,
                                      criterion)  #Test Loss is dependent on
Ejemplo n.º 21
0
 def _evaluate(self, p_matrix, test, test_index):
     return evaluate(p_matrix, test, test_index)
Ejemplo n.º 22
0
def test(algorithm, resfolder='', longer=False):
    dir = '../base/MIR-1K/UndividedWavfile/' if longer else '../base/MIR-1K/Wavfile/'
    metrics = ''  # isto menjamo i ime fajla

    if longer:
        resfolder = '-long' + resfolder

    count = len(os.listdir(dir))
    results = np.empty((count, 6))
    songs = np.sort(os.listdir(dir))[0:count]
    i = 0

    algorithm = algorithm.upper()
    savedir = '../results{}/{}'.format(resfolder, algorithm)
    Path(savedir).mkdir(parents=True, exist_ok=True)
    for i in range(count):
        print('{}/{}'.format(i + 1, count))

        song = songs[i] = songs[i][:-4]
        rate, audiol, audior = load('{}/{}.wav'.format(dir, song))
        audio = audiol // 2 + audior // 2

        if 'PLCAL' in algorithm:
            voice, music = plca(
                audio, rate,
                len(audio) > 5,
                labels(song, segnuml if longer else segnums, longer))
        else:
            voice, music = apply(algorithm, audio, rate)
        '''if i < 10:
            save(music, rate, '{}/{}-music.wav'.format(savedir, name))
            save(voice, rate, '{}/{}-voice.wav'.format(savedir, name))'''

        # print('gotov algoritam')

        sdr, sir, sar = evaluate(audior, audiol, voice, music)
        print("\033[1A\033[J", end='')  # brise prosli red
        print(
            'SDR: {0[0]:05.2f}(V)  {0[1]:05.2f}(M)  SIR: {1[0]:05.2f}(V)  {1[1]:05.2f}(M)  SAR: {2[0]:05.2f}(V)  {2[1]:05.2f}(M)'
            .format(sdr, sir, sar))
        results[i] = np.concatenate([sdr, sir, sar])

    mean = '\nMean:\nSDR: {0[0]:05.2f}(V)  {0[1]:05.2f}(M) \nSIR: {0[2]:05.2f}(V)  {0[3]:05.2f}(M) \nSAR: {0[4]:05.2f}(V)  {0[5]:05.2f}(M)'.format(
        np.mean(results, axis=0))
    median = ' \nMedian:\nSDR: {0[0]:05.2f}(V)  {0[1]:05.2f}(M) \nSIR: {0[2]:05.2f}(V)  {0[3]:05.2f}(M) \nSAR: {0[4]:05.2f}(V)  {0[5]:05.2f}(M)'.format(
        np.median(results, axis=0))
    maximum = (
        '\nMaximum:\nSDR: {0[0]:05.2f}[{1[0]:03}](V)  {0[1]:05.2f}[{1[1]:03}](M) \nSIR: {0[2]:05.2f}[{1[2]:03}](V)  {0[3]:05.2f}[{1[3]:03}](M)'
        '\nSAR: {0[4]:05.2f}[{1[4]:03}](V)  {0[5]:05.2f}[{1[5]:03}](M)'
    ).format(np.max(results, axis=0),
             np.argmax(results, axis=0) + 1)
    minimum = (
        '\nMin:\nSDR: {0[0]:05.2f}[{1[0]:03}](V)  {0[1]:05.2f}[{1[1]:03}](M) \nSIR: {0[2]:05.2f}[{1[2]:03}](V)  {0[3]:05.2f}[{1[3]:03}](M)'
        '\nSAR: {0[4]:05.2f}[{1[4]:03}](V)  {0[5]:05.2f}[{1[5]:03}](M)'
    ).format(np.min(results, axis=0),
             np.argmin(results, axis=0) + 1)

    print(mean)
    print(median)
    print(maximum)
    print(minimum)

    file = open('{}/metrics{}.txt'.format(savedir, metrics), 'w+')

    for i in range(count):
        print(
            '{0:20}: SDR [{1[0]:05.2f}  {1[1]:05.2f}]  SIR [{1[2]:05.2f}  {1[3]:05.2f}]  SAR [{1[4]:05.2f}  {1[5]:05.2f}]'
            .format(songs[i], results[i]),
            file=file)

    print(mean, file=file)
    print(median, file=file)
    print(maximum, file=file)
    print(minimum, file=file)

    file.close()
Ejemplo n.º 23
0
"""
Calculate the PESQ score
"""

import common
from pypesq import pesq as _pesq

pesq = common.evaluate(_pesq)