Exemplo n.º 1
0
def train():
    plot_scores = []
    plot_mean_scores = []
    total_score = 0
    record = 0
    agent = Agent()
    game = SnakeGameAI()
    while True:
        state_old = agent.get_state(game)

        final_move = agent.get_action(state_old)

        reward, done, score = game.play_step(final_move)
        state_new = agent.get_state(game)

        agent.train_short_memory(state_old, final_move, reward, state_new, done)

        agent.remember(state_old, final_move, reward, state_new, done)

        if done:
            game.reset()
            agent.n_games += 1
            agent.train_long_memory()
            if score > record:
                record = score
                agent.model.save()
            print('Game', agent.n_games, 'Score', score, 'Record:', record)
            plot_scores.append(score)
            total_score += score
            mean_score = total_score / agent.n_games
            plot_mean_scores.append(mean_score)
            plot(plot_scores, plot_mean_scores)
Exemplo n.º 2
0
def main(image_path, saved_model_path, top_k=1, json_path=None):
    '''
    Input: image_path, saved_model_path, top_k, json_path
    Output: plot with the photo provided with the predicted class as title, together with a histogram
            with top_k most likely classes and their probabilities.
    '''
    model = load_model(saved_model_path)
    probs, class_probs, image = predict(image_path, model, top_k)
    plot(image, probs, class_probs, json_path, top_k)
Exemplo n.º 3
0
def train():
    # Funkcija za trening modela.
    score = 0
    plot_scores = []
    plot_mean_scores = []
    total_score = 0
    # Nastavljanje spremenljivk.
    with open('record.txt', 'r') as f:
        record = f.read()
    record = int(record)
    agent = Agent()
    game = SnakeGameAI()
    # Izposoja podatkov in ustvarjanje okolja ter agenta.
    while True:
        state_old = agent.get_state(game)
        # Staro stanje.
        final_move = agent.get_action(state_old)
        # Odločitev modela
        reward, done, score = game.play_step(final_move)
        state_new = agent.get_state(game)
        # Vrne rezultate in ustvari novo stanje.

        agent.train_short_memory(state_old, final_move, reward, state_new, done)
        # Navadno treniranje.

        agent.remember(state_old, final_move, reward, state_new, done)
        # Shranjevanje podatkov.

        if done:

            game.reset()
            agent.n_games += 1
            with open('games.txt', 'w') as f:
                f.write(str(agent.n_games))
            agent.train_long_memory()
            # Ponovno treniranje.

            if score > record:
                record = score
                agent.model.save()
                with open('record.txt', 'w') as f:
                    f.write(str(record))
                with open('games.txt', 'w') as f:
                    f.write(str(agent.n_games))
            # Ponastavljanje rekorda.

            print('Game', agent.n_games, 'Score', score, 'Record:', record)

            plot_scores.append(score)
            total_score += score
            mean_score = total_score / agent.n_games
            plot_mean_scores.append(mean_score)
            plot(plot_scores, plot_mean_scores)

            print('Povprečen rezultat: ', mean_score)
Exemplo n.º 4
0
def insertsomestuff():
    possible_plots = [2 * i for i in range(3)]
    possible_plots.append(1)

    pos = random.randint(1, len(board)) - 1

    ite = random.randint(1, 4)

    for j in range(ite):
        varone = random.randint(1, 4) - 1
        vartwo = random.randint(1, 4) - 1
        plot(varone, vartwo, board, possible_plots[pos])
def problem2():
    f = open('in_out/2.in', 'r')
    g = open('in_out/my_output/2.out', 'w')

    pointList = hlp.readPoints(f)
    convex_hull = hlp.convexHull(pointList)

    hlp.plot(pointList)
    hlp.plot(convex_hull, False)
    plt.title('Problem 2 - Convex Hull')
    plt.show()

    for pt in convex_hull:
        g.write(str(pt[0]) + ' ' + str(pt[1]) + '\n')
Exemplo n.º 6
0
def train():
    plot_scores = []
    plot_mean_scores = []
    total_score = 0
    record = 0
    agent = Agent()
    game = SnakeGameAI()
    while True:
        # get old state
        state_old = agent.get_state(game)

        # get move
        final_move = agent.get_action(state_old)

        # perform move and get new state
        reward, done, score = game.play_step(final_move)
        state_new = agent.get_state(game)

        # train short memory
        agent.train_short_memory(state=state_old,
                                 action=final_move,
                                 reward=reward,
                                 next_state=state_new,
                                 done=done)

        # remember
        agent.remember(state=state_old,
                       action=final_move,
                       reward=reward,
                       next_state=state_new,
                       done=done)

        if done:
            # train long memory, plot results
            game.reset()
            agent.n_games += 1
            agent.train_long_memory()

            if score > record:
                record = score
                agent.model.save()
            print('Game: ', agent.n_games, 'Score: ', score, 'Record: ',
                  record)

            plot_scores.append(score)
            total_score += score
            mean_score = total_score / agent.n_games
            plot_mean_scores.append(mean_score)
            plot(plot_scores, plot_mean_scores)
Exemplo n.º 7
0
def train():
    plot_scores = []
    plot_avg_scores = []
    total_score = 0
    max_score = 0
    agent = Agent()
    game = SnakeGameAI()
    while True:
        # get current state of the game
        state_old = agent.get_state(game)

        # get the move based on the current state
        final_move = agent.get_action(state_old)

        # actually perform the move in the game
        reward, done, score = game.play_step(final_move)
        state_new = agent.get_state(game)

        # training the short term memory of the agent
        agent.train_short_memory(state_old, final_move, reward, state_new,
                                 done)

        # remember this iteration
        agent.remember(state_old, final_move, reward, state_new, done)

        if done:
            # reset the environment for another game
            game.reset()

            # increase game counter
            agent.num_games += 1

            # train the long term memory after a game
            agent.train_long_memory()

            # update the highscore if necessary
            if score > max_score:
                max_score = score
                agent.model.save()

            print('Game', agent.num_games, 'Score', score, 'Highscore',
                  max_score)

            plot_scores.append(score)
            total_score += score
            avg_score = total_score / agent.num_games
            plot_avg_scores.append(avg_score)
            plot(plot_scores, plot_avg_scores)
def problem4():
    f = open('in_out/4.in', 'r')
    g = open('in_out/my_output/4.out', 'w')

    pointList = hlp.readPoints(f)

    tspPath = TSP(pointList)

    hlp.plot(pointList)
    hlp.plot(tspPath, False)
    plt.title('Problem 4 - TSP')
    plt.show()

    for point in tspPath:
        g.write(str(point[0]) + ' ' + str(point[1]) + '\n')
    g.write(str(tspPath[0][0]) + ' ' + str(tspPath[0][1]))
Exemplo n.º 9
0
def main():
    # Prints hyperparameters
    print("================ Hyperparameters ====================")
    statprinter()

    # Gets tranforms
    transform = dataTransforms()

    # input data
    trainData = trainProcess(transform)
    testData = testProcess(transform)
    validData, input_dimension = validProcess(transform)

    # Load Data
    print("================LOADING DATA====================")
    trainLoader, testLoader, validLoader = getLoaders(trainData, testData,
                                                      validData)

    printDataStats(trainData, testData, validData)

    model = RNN(input_size, hidden_size, num_layers, num_classes).to(device)

    criterion, optimizer = setCriteria(model)

    print("================TRAINING MODEL====================")
    saved_losses, validation_losses = trainModel(trainLoader, testLoader,
                                                 model, criterion, optimizer)
    # Save the model checkpoint
    torch.save(model.state_dict(), 'lstm_model.ckpt')

    print("================TEST MODEL====================")
    correct, total, actual, pred = testModel(testLoader, model, len(testData))

    accuracy = 100 * correct / total
    plot(saved_losses, validation_losses, accuracy)
    confusionmMat = confusion_matrix(actual, pred)
    print("...................................")
    print("Confusion Matrix:", confusionmMat)
    print("...................................")

    plt.figure()
    plot_confusion_matrix(confusionmMat,
                          classes=classes_str1,
                          normalize=True,
                          title='Normalized confusion matrix')

    perClassAccuracy(correct, total, testLoader, model)
Exemplo n.º 10
0
def main():

    if len(sys.argv) < 4 or len(sys.argv) > 5:
        helper.msg(
            "Usage: timeanalysis TimeseriesFile Top ScalingFactor [plotname]\n"
        )
        exit(0)
    filename = sys.argv[1]
    top = int(sys.argv[2])
    scalingfactor = int(sys.argv[3])

    helper.msg("Reading the file...\n")
    filedata = json.load(open(filename))
    helper.msg("[DONE]\n")
    helper.msg("Parsing the file...\n")

    startdate_str, freq, timeseries = filedata
    startdate = helper.strtodate(startdate_str)
    helper.msg("[DONE]\n")

    perminute, sorted_perminute = {}, {}
    for category in timeseries:
        helper.msg("Changing the time scale...\n")

        perminute[category] = helper.scaletime(timeseries[category],
                                               scalingfactor)
        helper.msg("[DONE]\n")
        tickcount = len(perminute[category])

        sorted_perminute[category] = [(dict)] * tickcount

        helper.msg("Calculating the top topics...\n")
        for tick in range(tickcount):
            sorted_perminute[category][tick] = dict(
                sorted(perminute[category][tick].items(),
                       key=lambda l: l[1],
                       reverse=True)[:top])

    #print helper.findtops(perminute[category], top)
    print json.dumps((startdate_str, scalingfactor * freq, sorted_perminute))
    if len(sys.argv) == 5:
        plotname = sys.argv[4]
        for category in sorted_perminute:
            #print sorted_perminute
            helper.plot(sorted_perminute[category], startdate,
                        scalingfactor * freq, plotname + " " + category)
Exemplo n.º 11
0
def main():

    if len(sys.argv) < 4 or len(sys.argv) > 5:
        helper.msg("Usage: timeanalysis TimeseriesFile Top ScalingFactor [plotname]\n")
        exit(0)
    filename = sys.argv[1]
    top = int(sys.argv[2])
    scalingfactor = int(sys.argv[3])    

    helper.msg("Reading the file...\n")
    filedata = json.load(open(filename))
    helper.msg("[DONE]\n")
    helper.msg("Parsing the file...\n")        
    
    startdate_str, freq, timeseries = filedata
    startdate = helper.strtodate(startdate_str)
    helper.msg("[DONE]\n")    

    perminute, sorted_perminute = {}, {}
    for category in timeseries:
        helper.msg("Changing the time scale...\n")

        perminute[category] = helper.scaletime(
            timeseries[category], scalingfactor)
        helper.msg("[DONE]\n")
        tickcount = len(perminute[category])
        
        sorted_perminute[category] = [(dict)]*tickcount

        helper.msg("Calculating the top topics...\n")
        for tick in range(tickcount):
            sorted_perminute[category][tick] = dict(
                sorted(perminute[category][tick].items(), 
                key=lambda l: l[1], reverse=True) [:top]
                )
            

    #print helper.findtops(perminute[category], top)
    print json.dumps((startdate_str, scalingfactor * freq, sorted_perminute))
    if len(sys.argv) == 5:
        plotname = sys.argv[4]
        for category in sorted_perminute:
            #print sorted_perminute    
            helper.plot(sorted_perminute[category], 
                startdate, scalingfactor * freq, plotname + " " + category)
Exemplo n.º 12
0
def train():
    plot_scores = []
    plot_mean_score = []
    total_score = 0
    record = 0
    agent = Agent()
    game = SnakeGamesAI()

    while True:
        # get old state
        state_old = agent.get_state(game)

        #move
        final_move = agent.get_action(state_old)

        #perform
        reward, done, score = game.play_step(final_move)
        state_new = agent.get_state(game)

        #train short
        agent.train_short_memory(state_old, final_move, reward, state_new,
                                 done)

        #remember

        agent.remember(state_old, final_move, reward, state_new, done)

        if done:
            #train the long
            game.reset()
            agent.n_games += 1
            agent.train_long_memory()

            if score > record:
                record = score
                agent.model.save()
                #------------

            print("Game:", agent.n_games, "score:", score, 'Record:', record)

            plot_scores.append(score)
            total_score += score
            mean_score = total_score / agent.n_games
            plot_mean_score.append(mean_score)
            plot(plot_scores, plot_mean_score)
Exemplo n.º 13
0
def train():
    plot_scores = []
    plot_mean_scores = []
    total_score = 0
    record = 0
    agent = Agent()
    game = SnakeGameAI()
    while True:  # the good old while true
        # get old state
        state_old = agent.get_state(game)

        # get move prediction
        final_move = agent.get_action(state_old)

        # perform move and get new state
        reward, done, score = game.play_step(final_move)
        state_new = agent.get_state(game)

        # train short memory with the information we just got by playing A in state S and getting reward R and ending
        # up in S' (new state)
        agent.train_short_memory(state_old, final_move, reward, state_new,
                                 done)

        # remember for after epoch learning
        agent.remember(state_old, final_move, reward, state_new, done)

        # after every epoch
        if done:
            # train long memory, plot result
            game.reset()
            agent.n_games += 1
            agent.train_long_memory()

            if score > record:
                record = score
                agent.model.save()

            print('Game', agent.n_games, 'Score', score, 'Record:', record)

            plot_scores.append(score)
            total_score += score
            mean_score = total_score / agent.n_games
            plot_mean_scores.append(mean_score)
            plot(plot_scores, plot_mean_scores)
Exemplo n.º 14
0
def train():
    plot_scores = []
    plot_mean_scores = []
    total_score = 0
    record_score = 0

    agent = Agent()
    env = SnakeGameEnv()

    # training loop
    while True:
        # get old state
        old_state = agent.get_state(env)

        # get action
        move = agent.get_action(old_state)

        # perform the move and get new state
        reward, done, score = env.play_step(move)
        new_state = agent.get_state(env)

        # train short memory
        agent.train_short_memory(old_state, move, reward, new_state, done)

        # store data
        agent.store_data(old_state, move, reward, new_state, done)

        if done:
            # train long memory (all the previous episodes) and plot the results
            env.reset()
            agent.num_games += 1
            agent.train_long_memory()

            if score > record_score:
                record_score = score
                agent.model.save()

            print("Game", agent.num_games, "Score", score, "Record",
                  record_score)
            plot_scores.append(score)
            total_score += score
            mean_score = total_score / agent.num_games
            plot_mean_scores.append(mean_score)
            plot(plot_scores, plot_mean_scores)
Exemplo n.º 15
0
def train():
    plot_scores = []
    plot_mean_scores = []
    total_score = 0
    record = 0
    agent = Agent()
    game = SpaceInvadersAI(Settings, gf, GameStats, Ship, Scoreboard, Button,
                           Bullet, Alien)
    while True:
        # get old state
        state_old = agent.get_state(game)

        # get move
        final_move = agent.get_action(state_old)

        # perform move and get new state
        reward, done, score = game.check_events(final_move)
        # print(score)
        state_new = agent.get_state(game)

        # train short memory
        agent.train_short_memory(state_old, final_move, reward, state_new,
                                 done)

        # remember
        agent.remember(state_old, final_move, reward, state_new, done)

        if done:
            # train long memory, plot result
            game.reset()
            agent.n_games += 1
            agent.train_long_memory()

            if score > record:
                record = score
                agent.model.save()

            print('Game', agent.n_games, 'Score', score, 'Record:', record)

            plot_scores.append(score)
            total_score += score
            mean_score = total_score / agent.n_games
            plot_mean_scores.append(mean_score)
            plot(plot_scores, plot_mean_scores)
Exemplo n.º 16
0
def train():
  plot_scores = []
  plot_mean_scores = []
  total_score = 0
  highscore = 0
  agent = Agent()
  game = SnakeGameAI()

  while True:
    # Get old state
    state_old = agent.get_state(game)

    # Get move
    final_move = agent.get_action(state_old)

    # Perform move and get new state
    reward, done, score = game.play_step(final_move)
    state_new = agent.get_state(game)

    # Train short memory
    agent.train_short_memory(state_old, final_move, reward, state_new, done)

    # Remember
    agent.remember(state_old, final_move, reward, state_new, done)

    if done:
      # Train long memory
      game.reset()
      agent.n_games += 1
      agent.train_long_memory()

      if score > highscore:
        highscore = score
        agent.model.save()

      print('Game', agent.n_games, 'Score', score, 'Highscore:', highscore)

      plot_scores.append(score)
      total_score += score
      mean_score = total_score / agent.n_games
      plot_mean_scores.append(mean_score)
      plot(plot_scores, plot_mean_scores)
Exemplo n.º 17
0
def train():
    # data to plot
    plotScores = []
    plotMeanScores = []
    totalScore = 0
    record = 0
    agent = Agent()
    game = SnakeGameAI()

    while True:
        stateOld = agent.getState(game)

        move = agent.getAction(stateOld)

        reward, game_over, score = game.play_step(move)

        stateNew = agent.getState(game)

        agent.trainShortMemory(stateOld, move, reward, stateNew, game_over)

        agent.remember(stateOld, move, reward, stateNew, game_over)

        if game_over:
            # train long memory, plot results, reset the game
            game.reset()

            agent.numberOfGames += 1
            agent.trainLongMemory()

            if score > record:
                record = score
                agent.model.save()

            print("Game", agent.numberOfGames, 'Score', score, 'Record',
                  record)

            plotScores.append(score)
            totalScore += score
            mean_score = totalScore / agent.numberOfGames
            plotMeanScores.append(mean_score)
            plot(plotScores, plotMeanScores)
Exemplo n.º 18
0
def train():
    plot_scores = []
    plot_mean_scores = []
    total_scores = 0
    record = 0
    agent = Agent()
    agent.model.load("model_test2.pth")
    game = SnakeGameAI()
    while True:
        # get the current state
        old_state = agent.get_state(game)

        final_move = agent.get_action(old_state)
        # perform move and get new state
        reward, game_over, score = game.play_step(final_move)

        new_state = agent.get_state(game)

        agent.train_short_memory(old_state, final_move, reward, new_state,
                                 game_over)

        agent.remember(old_state, final_move, reward, new_state, game_over)

        if game_over:
            # train long memory, plot result
            game.reset()
            agent.number_of_games += 1
            agent.train_long_memory()

            if score > record:
                record = score
                agent.model.save("model_test2.pth")
            print(
                f"Game: {agent.number_of_games}, Score: {score}, Record: {record}"
            )

            plot_scores.append(score)
            total_scores += score
            mean_score = total_scores / agent.number_of_games
            plot_mean_scores.append(mean_score)
            plot(plot_scores, plot_mean_scores)
Exemplo n.º 19
0
def train():
    plot_scores = []
    plot_mean_scores = []
    total_score = 0
    best_score = 0
    agent = Agent(use_checkpoint=True)
    game = SnakeGameAI()

    while True:
        # get current state
        state_curr = agent.get_state(game)

        # get action
        action = agent.get_action(state_curr)

        # perform action & get new state
        reward, game_over, score = game.play_step(action)
        state_new = agent.get_state(game)

        agent.train_short_memory(state_curr, action, reward, state_new,
                                 game_over)

        agent.remember(state_curr, action, reward, state_new, game_over)

        if game_over:
            game.reset()
            agent.no_of_games += 1
            agent.train_long_memory()

            if score > best_score:
                best_score = score
                agent.model.save()

            print("Game", agent.no_of_games, "Score", score, "Best score",
                  best_score)

            plot_scores.append(score)
            total_score += score
            mean_score = total_score / agent.no_of_games
            plot_mean_scores.append(mean_score)
            plot(plot_scores, plot_mean_scores)
Exemplo n.º 20
0
def problem3():
    f = open('in_out/3.in', 'r')
    g = open('in_out/my_output/3.out', 'w')

    polygonPoints = hlp.readPoints(f)
    pointList = hlp.readPoints(f)

    hlp.plot(polygonPoints, False)
    hlp.plot(pointList)
    plt.title('Problem 3 - Point Position Relative to a Polygon')
    plt.show()

    for p in pointList:
        pos = position(p, polygonPoints)

        if pos < 0:
            g.write('outside\n')
        elif pos > 0:
            g.write('inside\n')
        else:
            g.write('on edge\n')
Exemplo n.º 21
0
 def train(self,
           path_params='best_params.json',
           path_model='model.h5',
           plot_chart=False,
           handmade_params=None):
     if os.path.exists(path_params) and not handmade_params:
         layers, average_pooling, batch, epochs, learning_rate = load_params(
             path_params)
     else:
         layers, average_pooling, batch, epochs, learning_rate = handmade_params
     layers = layer_combination[layers]
     print(
         fg('green') +
         'Parameters : layers : {0}, average_pooling : {1}, batch : {2}, epochs : '
         '{3}, '
         'learning rate : {4}'.format(layers, average_pooling, batch,
                                      epochs, learning_rate))
     sudoku_model = SudokuBreaker(layers=layers,
                                  average_pooling=average_pooling)
     id_mlflow = random.randint(1, 2542314)
     if self.custom:
         sudoku_model.fit_custom(self.train_x,
                                 self.train_y,
                                 self.val_x,
                                 self.val_y,
                                 batch=batch,
                                 epochs=epochs,
                                 learning_rate=learning_rate,
                                 id_mlflow=id_mlflow)
     else:
         sudoku_model.fit_inbuilt(self.train_x,
                                  self.train_y,
                                  self.val_x,
                                  self.val_y,
                                  batch=batch,
                                  epochs=epochs,
                                  learning_rate=learning_rate)
     sudoku_model.save(path_model)
     if plot_chart:
         plot(sudoku_model.hist)
Exemplo n.º 22
0
    def train(self):
        batches = glob(cfg.Paths.batches + "/batch[0-9]*.npz")
        log.info("Found {} batches.".format(len(batches)))
        tr_batches, vl_batches = train_test_split(batches,
                                                  shuffle=True,
                                                  train_size=0.8)

        # storing losses over time
        tr_log = {}
        for i in range(1, midi_cfg.n_tracks + 1):
            tr_log[f"TR_Loss_Track_{i}"] = []
        tr_log["TR_Loss_Total"] = []

        vl_log = {}
        for i in range(1, midi_cfg.n_tracks + 1):
            vl_log[f"VL_Accuracy_Track_{i}"] = []
        vl_log["VL_Accuracy_Total"] = []

        best_total_vl_accuracy = -1
        for epoch in range(1, training_cfg.n_epochs + 1):
            log.info("Epoch {} of {}".format(epoch, training_cfg.n_epochs))
            if (epoch % int(training_cfg.n_epochs / 10)) == 0:
                old = k.get_value(self.regularisation_weight)
                new = old + 0.1
                k.set_value(self.regularisation_weight, new)
                log.info(" - Regularisation weight annealed to {}".format(
                    k.get_value(self.regularisation_weight)))
                log.info(" - Plotting metrics graphs")
                h.plot(tr_log)
                h.plot(vl_log)

            # train on the training set
            log.info(" - Training on training set...")
            training_losses = self._run(tr_batches, False)
            for training_loss in training_losses:
                sum_loss = 0
                for i in range(1, midi_cfg.n_tracks + 1):
                    loss = training_loss[i]
                    sum_loss += loss
                    tr_log[f"TR_Loss_Track_{i}"].append(loss)
                tr_log["TR_Loss_Total"].append(sum_loss / midi_cfg.n_tracks)

                # for i, key in enumerate(["Z_Score_Real", "Z_Score_Fake", "Z_Score_Penalty", "Gen_Score"]):
                #     tr_log[key].append(training_loss[6 + 2 * midi_cfg.n_tracks + (i * 2)])

            # at the end of each epoch, we evaluate on the validation set
            log.info(" - Evaluating on validation set...")
            validation_losses = self._run(vl_batches, True)
            for validation_loss in validation_losses:
                sum_mean_loss = 0
                vl_log_tmp = {}
                for i in range(1, midi_cfg.n_tracks + 1):
                    vl_log_tmp[f"VL_Accuracy_Track_{i}"] = []
                for i in range(0, midi_cfg.n_tracks):
                    # Collect all accuracy values
                    loss = validation_loss[5 + midi_cfg.n_tracks + (i * 2)]
                    vl_log_tmp[f"VL_Accuracy_Track_{i + 1}"].append(loss)
                for i in range(1, midi_cfg.n_tracks + 1):
                    # Calculate the mean accuracy for each track
                    mean_loss = mean(vl_log_tmp[f"VL_Accuracy_Track_{i}"])
                    sum_mean_loss += mean_loss
                    vl_log[f"VL_Accuracy_Track_{i}"].append(mean_loss)
                # Calculate the overall accuracy
                vl_log["VL_Accuracy_Total"].append(sum_mean_loss /
                                                   midi_cfg.n_tracks)

                if vl_log["VL_Accuracy_Total"][-1] > best_total_vl_accuracy:
                    best_total_vl_accuracy = vl_log["VL_Accuracy_Total"][-1]
                    self.save_checkpoint()
Exemplo n.º 23
0
def main(settings,
         dataset_number=5,
         image_size=200,
         padding=2,
         n_clusters=None,
         out_file='../img.png'):
    # Load images and get embeddings from NN
    imgs = helper.get_images(dataset_number)
    embeddings = helper.get_embeddings(dataset_number, imgs)
    print('loaded {} images'.format(len(imgs)))

    if settings.shuffle:
        random.shuffle(imgs)

    # Compute 2D embeddings with MDS
    if settings.no_mds:
        em_2d = np.random.random((len(imgs), 2))
    else:
        em_2d = compute.mds(embeddings, init=compute.pca(embeddings))

    # Perform clustering
    cluster_centers, labels = compute.k_means(em_2d, k_default=n_clusters)
    print('clusters:', len(cluster_centers))
    print('sizes of clusters: ', end='')
    for l in range(max(labels) + 1):
        print(sum(labels == l), end=', ')
    print()

    # Representative images
    silhouettes = compute.get_silhouettes(em_2d, labels)
    representative = compute.get_representative(em_2d, cluster_centers, labels,
                                                silhouettes)

    # Sizes and positions of the images
    ratios = helper.get_image_size_ratios(imgs)
    sizes = compute.get_sizes(image_size, em_2d, ratios, cluster_centers,
                              labels, representative)
    positions = compute.get_positions(em_2d, image_size)

    # Expand as long as overlaps occur - gradually increase space between images
    iters = 0
    while compute.overlap(positions, sizes, padding):
        positions *= 1.05
        iters += 1
    print('overlap resolved in {} iterations'.format(iters))

    dists = [compute.get_distances(positions)]

    # Overlapping resolved, now "shrink" towards representative images
    if not settings.no_intra:
        positions = compute.shrink_intra(positions, sizes, representative,
                                         labels, padding)
        dists.append(compute.get_distances(positions))

    if not settings.no_inter:
        # Move clusters closer together by same factor
        positions = compute.shrink_inter1(positions, sizes, representative,
                                          labels, padding)
        dists.append(compute.get_distances(positions))

        # Move clusters closer together separately by different factors
        positions = compute.shrink_inter2(positions, sizes, representative,
                                          labels, padding)
        dists.append(compute.get_distances(positions))

    if not settings.no_xy and not settings.no_intra:
        # Shrink by x and y separately
        positions = compute.shrink_xy(positions, sizes, representative, labels,
                                      padding)
        dists.append(compute.get_distances(positions))

    if not settings.no_shake:
        # "Shake" images with small offsets
        for _ in range(10):
            positions = compute.shrink_with_shaking(positions, sizes, padding)
        dists.append(compute.get_distances(positions))

    if not settings.no_final and not settings.no_intra:
        # Shrink to finalize positions
        positions = compute.shrink_xy(positions, sizes, representative, labels,
                                      padding)
        dists.append(compute.get_distances(positions))
        positions = compute.shrink_xy(positions,
                                      sizes,
                                      representative,
                                      labels,
                                      padding,
                                      smaller=True)
        dists.append(compute.get_distances(positions))

        if not settings.no_inter:
            positions = compute.shrink_inter2(positions, sizes, representative,
                                              labels, padding)
            dists.append(compute.get_distances(positions))

    im = helper.plot(imgs, positions, sizes)
    im.save(out_file)
    # helper.plot_clusters(em_2d, cluster_centers, labels, representative)

    scores = list(map(lambda d: compute.compare_distances(dists[0], d), dists))

    print('\nscores:')
    for i, s in enumerate(scores[1:]):
        print('{:.3f},'.format(s), end=' ')