def get_clustering(data_to_cluster, cluster_model_type, cluster_model_params):
    sw = Stopwatch()
    sw.start()

    if cluster_model_type == 'KMEANS':
        cluster_model = MiniBatchKMeans(**cluster_model_params)
    elif cluster_model_type == 'DBSCAN':
        cluster_model = DBSCAN_w_prediction(**cluster_model_params)

    cluster_model.fit(data_to_cluster)
    cluster_model.X = data_to_cluster

    sw.stop()
    logging.debug('Descriptors clustered into %d clusters.' %
                  cluster_model.n_clusters)
    return cluster_model
#print "Sudoku dpll",dpll(X2SATsudoku(sud)) # Opomba: tega ne premelje skozi v normalnem casu.

print "::: END DPLL :::\n\n"
"""
######################################################
#################UTILS################################
######################################################
"""
#Razred stoparica, ki nam na enostaven nacin omogoca merjenje ter primerjanje casa med razlicnimi metodami.

from utils import Stopwatch
print "::: BEGIN UTILS :::"
stoparica = Stopwatch("Primer uporabe")     # Kot parameter lahko podamo ime (tag).
                                        # Stoparica se avtomatsko zazene, ko jo ustvarimo.
stoparica.stop()                        # Ustavimo jo s stop.
stoparica.restart()        # Ko jo restartamo, pocisti vse prejsnje vrednosti.
hadamardova_matrika(8)
stoparica.intermediate("Vmesni cas 10")     # Lahko dodamo vec vmesnih casov.
hadamardova_matrika(10)
stoparica.intermediate("Vmesni cas 12")
stoparica.stop("Skupaj")
print stoparica                         # Rezultat izpisemo tako, da stoparico enostavno izpisemo z print.
                                        # Pri izpisu se vsak vmesni cas meri od prejsnjega vmesnega casa,
                                        # TOTAL pa je razlika od (konec-start).

#Primer uporabe
st = Stopwatch("Optimizacija")
hadamardova_matrikaOLD(8)
st.intermediate("Old 10")
hadamardova_matrika(8)
Beispiel #3
0
    # Evaluate on the validation set
    # avg_loss_valid = evaluate(net, criterion, valid_loader, device)
    # y_loss_valid.append(avg_loss_valid)
    # logger.log("Average validation loss: {}".format(avg_loss_valid))

    #x_error_median, q_error_median, med_valid_loss = evaluate_median(net, criterion, itertools.islice(valid_loader, 10), device)
    x_error_median, q_error_median, med_valid_loss = evaluate_median(
        net, criterion, valid_loader, device)
    # y_loss_valid.append(loss_median)
    # y_training.append(np.median(training_loss))
    net.log("Median validation error: {:.2f} m, {:.2f} °".format(
        x_error_median, q_error_median))
    net.log("Median validation loss: {}".format(med_valid_loss))

    # Print some stats
    elapsed_time = stopwatch_epoch.stop()
    net.log("Epoch time: {:0.2f} minutes".format(elapsed_time))
    # print_torch_cuda_mem_usage()
    total_epoch_time += elapsed_time
    avg_epoch_time = total_epoch_time / (epoch + 1)
    training_time_left = (EPOCHS - epoch - 1) * avg_epoch_time
    net.log("Training time left: ~ {:.2f} minutes ({:.2f} hours)".format(
        training_time_left, training_time_left / 60))

    # # Plot the average loss over the epochs
    # loss_fig = plt.figure()
    # loss_ax = loss_fig.gca()
    # loss_ax.plot(x, y_loss_valid, "r", label = "Validation")
    # loss_ax.plot(x, y_training, "b", label = "Training")
    # plt.xlabel("Epoch")
    # plt.ylabel("Median loss")