コード例 #1
0
from som import SOM
from plot import plot_neuron_chains, plot_loss, plot_routes
from opts import OPT

if __name__ == '__main__':
    args = OPT().args()
    if args.mode == 'run':
        SOM(args)
    else:
        plot_loss(input_dir=args.out_dir)
        plot_routes(input_dir=args.out_dir)
        plot_neuron_chains(input_dir=args.out_dir)
コード例 #2
0
ファイル: main.py プロジェクト: cobap/ZooCluster
    parametros = None
    with open('./parametros.txt', 'r') as inp:
        parametros = inp.read().splitlines()
    return parametros


if __name__ == "__main__":
    pre_process = ProcessZoo()
    animal_matrix = pre_process.get_original_matrix()

    for parametros in le_parametros():
        ############################# SOM #############################
        print(parametros)
        _mapsize = int(parametros[0]) * 10
        som = SOM(animal_matrix=animal_matrix,
                  mapsize=[_mapsize, _mapsize],
                  parametros=parametros)
        # som.view2dpacked()
        # som.train_som()
        # som.view2dpacked()
        # som.visualization_umatrix()
        # som.interpolation()

        ############################# KMEANS #############################
        kmeans = Kmeans(points=animal_matrix,
                        k_centroids=4,
                        n_iteracoes=100,
                        error=0.1)
        kmeans.run_kmeans()

        ############################# POS PROCESSAMENTO #############################
コード例 #3
0

def display_digit(num):
    label = y_train[num].argmax(axis=0)
    image = x_train[num].reshape([28, 28])
    plt.title('Example: %d  Label: %d' % (num, label))
    plt.imshow(image, cmap=plt.get_cmap('gray_r'))
    plt.show()


display_digit(ran.randint(0, x_train.shape[0]))

# Import som class and train into 30 * 30 sized of SOM lattice
from som import SOM

som = SOM(30, 30, x_train.shape[1], 200)
som.train(x_train)

# Fit train data into SOM lattice
mapped = som.map_vects(x_train)
mappedarr = np.array(mapped)
x1 = mappedarr[:, 0]
y1 = mappedarr[:, 1]

index = [np.where(r == 1)[0][0] for r in y_train]
index = list(map(str, index))

## Plots: 1) Train 2) Test+Train ###

plt.figure(1, figsize=(12, 6))
plt.subplot(121)
コード例 #4
0
ファイル: main.py プロジェクト: Samyak2/som
    #Map colours to their closest neurons
    mapped = som.map_vects(torch.Tensor(colors))

    #Plot
    plt.imshow(image_grid)
    plt.title('Color SOM')
    for i, map_ in enumerate(mapped):
        plt.text(map_[1],
                 map_[0],
                 color_names[i],
                 ha='center',
                 va='center',
                 bbox=dict(facecolor='white', alpha=0.5, lw=0))


#Train a 20x30 SOM with 100 iterations
n_iter = 100
som = SOM(m, n, 3, n_iter)
for iter_no in range(n_iter):
    #Train with each vector one by one
    som(data_t, iter_no)

    if iter_no % 5 == 0:
        draw_som(som)
        plt.pause(0.0001)
        plt.clf()
        print(f"Trained {iter_no} iterations")
plt.ioff()
draw_som(som)
plt.show()
コード例 #5
0
     [1., 1., 1.],
     [.33, .33, .33],
     [.5, .5, .5],
     [.66, .66, .66]])

colors2 = np.array(
    [[0., 0., 0.],
     [0., 0., 1.],
     [1., 1., 0.],
     [1., 1., 1.],
     [1., 0., 0.]])

color_names = \
    ['black', 'blue', 'darkblue', 'skyblue',
     'greyblue', 'lilac', 'green', 'red',
     'cyan', 'violet', 'yellow', 'white',
     'darkgrey', 'mediumgrey', 'lightgrey']

s = SOM(colors2, [25, 25], alpha=0.3)

# Initial weights
plt.imshow(s.w_nodes)
plt.show()

# Learning to cluster the RGB colors
s.train(max_it=30)

# Trained weights
plt.imshow(s.w_nodes)
plt.show()
コード例 #6
0
#Training inputs for RGBcolors
colors = np.array([[0., 0., 0.], [0., 0., 1.], [0., 0., 0.5],
                   [0.125, 0.529, 1.0], [0.33, 0.4, 0.67], [0.6, 0.5, 1.0],
                   [0., 1., 0.], [1., 0., 0.], [0., 1., 1.], [1., 0., 1.],
                   [1., 1., 0.], [1., 1., 1.], [.33, .33, .33], [.5, .5, .5],
                   [.66, .66, .66]])

color_names = [
    'black', 'blue', 'darkblue', 'skyblue', 'greyblue', 'lilac', 'green',
    'red', 'cyan', 'violet', 'yellow', 'white', 'darkgrey', 'mediumgrey',
    'lightgrey'
]

#Train a 20x30 SOM with 400 iterations
som = SOM(10, 10, 3, 400)

som.train(colors)

#Get output grid
image_grid = som.get_centroids()

#Map colours to their closest neurons
mapped = som.map_vects(colors)

#Plot
plt.imshow(image_grid)
plt.title('Color SOM')
for i, m in enumerate(mapped):
    plt.text(m[1],
             m[0],
コード例 #7
0
sig = sig[:,[0,1,2,3,4,5,6,7]] # cuts the signal array to only include variables you want to train on
### PREPARE THE BACKGROUND ARRAY ###
bg = np.loadtxt('eta_mc_som_background_feb2019.txt') # convert to np array
np.random.shuffle(bg)
bg = bg[:2000,:]
bg2 = bg
backupbg = bg2
#bg = bg[:10000,:] # cut the np array to preferred size
flag_bg = bg[:,8] # array of zeros for background, flags each event as background 
bg = bg[:,[0,1,2,3,4,5,6,7]] # cuts the background array to only include variables you want to train on
# finish preparing the final dataset (one big numpy array)
data = np.concatenate((sig,bg),axis=0) # concatenate the sig and bg numpy arrays into one big array
flags = np.concatenate((flag_sig, flag_bg), axis=0) # concatenates the flag arrays into one array, each entry corresponding to the entry of the data array
### TRAINING ###
#som = SOM(dimx, dimy, 8, 400) # Train a dimx X dimy SOM with 400 iterations, 8 is the number of variables in the data array
som = SOM(dimx, dimy, 8)
som.train(data) # trains on the dataset prepared
### THIS WILL TAKE AWHILE ###
### TO STORE THE TRAINING RESULTS INTERACTIVELY IN ipython DO: 
### weightages = som._weightages 
### %store weightages 
### THEN TO RECOVER DO: 
### %store -r 
### som = SOM(dimx, dimy, 8, 400)
### som._weightages = weightages
### som._trained = True
print(str(datetime.datetime.now())) # print the time to observe how long the training will take
mapped = np.array(som.map_vects(data)) # map each datapoint to its nearest neuron
# post training manipulations to the final dataset (one big numpy array)
data = np.append(data,mapped,axis=1) # append the mapped neurons to data
data = np.column_stack((data,flags)) # append the flags to the dataset 
コード例 #8
0
import matplotlib.pyplot as plt
from som import SOM

inputs = [[1, 4], [6, 2], [1, 3], [5, 1], [4, 0], [0, 4]]

weights = [[5, 0], [-1, 4]]

sigma = 1

learning_rate = 1

sigma_decrease_rate = 0

som = SOM(inputs,
          weights,
          sigma=sigma,
          learning_rate=learning_rate,
          sigma_decrease_rate=sigma_decrease_rate,
          epochs=10)
som.train(ignore_hk=True, display=True)
points = [[1, 4], [6, 2], [1, 3], [5, 1], [4, 0], [0, 4], [3, 4]]

predictions = som.predict(points, ignore_hk=True, display=True)

p, m = som.prediction_line(display=True)


def med(points, p, m):
    y = []
    for point in points:
        y.append(m * (point - p[0]) + p[1])
    return y
コード例 #9
0
def SelfOrganizingMap(Feature_List, where):
    if not os.path.exists(where):
        os.makedirs(where)

    som = SOM(SOM_LINES, SOM_COLS, len(Feature_List[0]), NUM_ITERATIONS)
    som.train(Feature_List)
    # Get output grid
    image_grid = som.get_centroids()
    #print 'Image grid shows weights for all the input features'
    #print image_grid
    # Get vector mappings
    mapped = som.map_vects(Feature_List)
    #print 'Mapping', mapped

    # Visualization part
    # Needs to be refactored to produce output for any number of clusters
    avg_line = []
    for i in range(NUM_CLUST):
        avg_line.append(0)
    avg_column = []
    for i in range(NUM_CLUST):
        avg_column.append(0)

    for i in range(0, len(mapped), NUM_CLUST):
        for j in range(NUM_CLUST):
            avg_line[(i + j) % NUM_CLUST] += mapped[i + j][0]
            avg_column[(i + j) % NUM_CLUST] += mapped[i + j][1]

    for i in range(len(avg_line)):
        avg_line[i] = avg_line[i] / (len(mapped) / NUM_CLUST)

    for i in range(len(avg_column)):
        avg_column[i] = avg_column[i] / (len(mapped) / NUM_CLUST)

    'Average location'
    print avg_line, avg_column

    # Grayscale landscape
    sorting_landscape = []
    for i in range(NUM_CLUST):
        sorting_landscape.append(np.zeros((SOM_LINES, SOM_COLS), np.float16))

    for i in range(len(mapped)):
        lin = mapped[i][0]
        col = mapped[i][1]
        sorting_landscape[i % NUM_CLUST][lin][col] += 0.05
        if sorting_landscape[i % NUM_CLUST][lin][col] > 1.0:
            sorting_landscape[i % NUM_CLUST][lin][col] = 1.0

    for i in range(NUM_CLUST):
        sorting_landscape[i][avg_line[i]][avg_column[i]] = 1.0

    for i in range(NUM_CLUST):
        io.imsave(where + str(i) + '_sorting_cluster.png',
                  sorting_landscape[i])

    # Colored landscape
    colored_landscape = np.zeros((SOM_LINES, SOM_COLS, 3), np.float16)
    white = np.array([1.0, 1.0, 1.0])

    # sorting
    insertion_color = np.array([0.0, 0.0, 1.0])  # blue
    bubble_color = np.array([0.0, 1.0, 0.0])  # green
    heap_color = np.array([1.0, 0.0, 0.0])  # red
    quick_color = np.array([1.0, 0.0, 1.0])  # magenta
    random_color = np.array([1.0, 1.0, 0.0])  # yellow

    # non-sorting
    reverseSorted_color = np.array([0.0, 1.0, 1.0])  # cyan
    intervalSwapSorted_color = np.array([1.0, 0.5, 0.0])  # orange
    reverse_color = np.array([0.5, 1.0, 0.0])  # lime-green
    intervalSwap_color = np.array([1.0, 0.0, 0.5])  # fuchsia

    colored = [
        insertion_color, bubble_color, heap_color, quick_color, random_color
    ]
    #add_colored = [reverseSorted_color, intervalSwapSorted_color, reverse_color, intervalSwap_color]
    #colored.extend(add_colored)

    grad = 0.005
    for i in range(len(mapped)):
        lin = mapped[i][0]
        col = mapped[i][1]
        colored_landscape[lin][col] += grad * colored[i % NUM_CLUST]
        #if colored_landscape[lin][col].any() > 1.0:
        #	colored_landscape[lin][col] -= 0.05*colored[i%NUM_CLUST]

    for i in range(NUM_CLUST):
        colored_landscape[avg_line[i]][
            avg_column[i]] = 0.3 * white + 0.7 * colored[i]

    io.imsave(where + 'all_sorting_clusters.png', colored_landscape)
コード例 #10
0
#the samples with which the map will be trained, basically these are vectors representing RGB colors
training_samples = np.array([[0., 0., 0.], [0., 0., 1.], [0., 0., 0.5],
                             [0.125, 0.529, 1.0], [0.33, 0.4, 0.67],
                             [0.6, 0.5, 1.0], [0., 1., 0.], [1., 0., 0.],
                             [0., 1., 1.], [1., 0., 1.], [1., 1., 0.],
                             [1., 1., 1.], [.33, .33, .33], [.5, .5, .5],
                             [.66, .66, .66]])

#the color's corresponding names, to label the plot
color_names = \
 ['black', 'blue', 'darkblue', 'skyblue',
    'greyblue', 'lilac', 'green', 'red',
    'cyan', 'violet', 'yellow', 'white',
    'darkgrey', 'mediumgrey', 'lightgrey']

som = SOM(60, 100, 3)
start = time.time()
scaling_factor = 1e2
som.train(training_samples, 400, 0.8, scaling_factor, 20, scaling_factor)
end = time.time()
total_time = end - start
print("Elapsed time: {}".format(total_time))
pl.imshow(som.som, origin='lower')

#label each weight with its corresponding name
for i in range(len(training_samples)):
    bmu = som.get_bmu(training_samples[i])
    pl.text(bmu[1],
            bmu[0],
            color_names[i],
            ha='center',
コード例 #11
0
    return X, Y


if __name__ == "__main__":
    with open('wine.data', 'r', encoding='utf-8') as f:
        data = f.read()
    data = data.split('\n')[:-1]
    X, Y = make_XY(data)
    X = feature_normalization(X)
    # exit()
    FEATURE_COUNT = len(X[0])
    CLASS_COUNT = len(list(set(Y)))
    print('FEATURE_COUNT:%d' % FEATURE_COUNT)
    print('CLASS_COUNT:%d' % CLASS_COUNT)

    som = SOM(8, 8)  # initialize the SOM
    som.fit(X, 10000, save_e=True, interval=100
            )  # fit the SOM for 10000 epochs, save the error every 100 steps
    som.plot_error_history(
        filename='images/som_error.png')  # plot the training error history

    # now visualize the learned representation with the class labels
    som.plot_point_map(X,
                       Y, ['Class %d' % (l + 1) for l in range(CLASS_COUNT)],
                       filename='images/som.png')
    for i in range(CLASS_COUNT):
        som.plot_class_density(X,
                               Y,
                               t=i,
                               name='Class %d' % (i + 1),
                               filename='images/class_%d.png' % (i + 1))
コード例 #12
0
sig = sig[:10000,:] # cut the np array to preferred size
flag_sig = sig[:,35] # array of ones for signal, flags each event as signal
sig = sig[:,[24,25,26,29,30,31,32,34]] # cuts the signal array to only include variables you want to train on
### PREPARE THE BACKGROUND ARRAY ###
bg = np.array(bgroot.tolist()) # convert to np array
np.random.shuffle(bg)
bg2 = bg
backupbg = bg2
bg = bg[:10000,:] # cut the np array to preferred size
flag_bg = bg[:,35] # array of zeros for background, flags each event as background 
bg = bg[:,[24,25,26,29,30,31,32,34]] # cuts the background array to only include variables you want to train on
# finish preparing the final dataset (one big numpy array)
data = np.concatenate((sig,bg),axis=0) # concatenate the sig and bg numpy arrays into one big array
flags = np.concatenate((flag_sig, flag_bg), axis=0) # concatenates the flag arrays into one array, each entry corresponding to the entry of the data array
### TRAINING ###
som = SOM(dimx, dimy, 8, 400) # Train a dimx X dimy SOM with 400 iterations, 8 is the number of variables in the data array
som.train(data) # trains on the dataset prepared
### THIS WILL TAKE AWHILE ###
### TO STORE THE TRAINING RESULTS INTERACTIVELY IN ipython DO: 
### weightages = som._weightages 
### %store weightages 
### THEN TO RECOVER DO: 
### %store -r 
### som = SOM(dimx, dimy, 8, 400)
### som._weightages = weightages
### som._trained = True
print(str(datetime.datetime.now())) # print the time to observe how long the training will take
mapped = np.array(som.map_vects(data)) # map each datapoint to its nearest neuron
# post training manipulations to the final dataset (one big numpy array)
data = np.append(data,mapped,axis=1) # append the mapped neurons to data
data = np.column_stack((data,flags)) # append the flags to the dataset 
コード例 #13
0
    colors = np.append(
        colors,
        np.array([[random.random(),
                   random.random(),
                   random.random()]]),
        axis=0)
#array to tensor
data = torch.Tensor(colors)

#hyper parameters
row = 40
col = 40
total_epoch = 1000

#som
som = SOM(3, (row, col))
for iter_no in range(total_epoch):
    som.self_organizing(data, iter_no, total_epoch)

#get weights (map)
weight = som.weight.reshape(3, row, col).numpy()
weight = np.transpose(weight, (
    1,
    2,
    0,
))

#plot
plt.title('Color SOM')
plt.imshow(weight)
plt.show()
コード例 #14
0
from som import SOM

s = SOM()

# teste do método distância euclidiana
d = s.distancia([2, 1, 1], [4, 2, 3])
print(d, ' = 3')

posM = s.melhorNeuronio([17, 13, 22])
print(posM)
print(s.matrizNeuronios[posM[0]][posM[1]])

r = s.melhorReposta([17, 13, 22])
print('movimento: ', r)
コード例 #15
0
      [1., 0., 0.],
      [0., 1., 1.],
      [1., 0., 1.],
      [1., 1., 0.],
      [1., 1., 1.],
      [.33, .33, .33],
      [.5, .5, .5],
      [.66, .66, .66]])
color_names = \
    ['black', 'blue', 'darkblue', 'skyblue',
     'greyblue', 'lilac', 'green', 'red',
     'cyan', 'violet', 'yellow', 'white',
     'darkgrey', 'mediumgrey', 'lightgrey']
 
#Train a 20x30 SOM with 400 iterations
som = SOM(20, 30, 3, 400)
som.train(colors)
 
#Get output grid
image_grid = som.get_centroids()
 
#Map colours to their closest neurons
#mapped = som.map_vects(colors)
 
#Plot
plt.imshow(image_grid)
plt.title('Color SOM')
'''
for i, m in enumerate(mapped):
    plt.text(m[1], m[0], color_names[i], ha='center', va='center',
             bbox=dict(facecolor='white', alpha=0.5, lw=0))
コード例 #16
0
def main():

    st.set_option('deprecation.showfileUploaderEncoding', False)

    activities = ['Description', 'Explore Data', 'EDA', 'Model Selection']

    choice = st.sidebar.selectbox('Select Activity', activities)

    html_temp = """
    <div style="background-color:tomato;padding:10px">
    <h2 style="color:white;text-align:center;">Parkinson Disease Classification</h2>
    </div>
    """

    st.markdown(html_temp, unsafe_allow_html=True)
    st.markdown("""<br><span>Made by </span><span style='color: #FF0000;'>Suraj Patil</span>""", unsafe_allow_html=True)

    # dataset = st.file_uploader('Upload Dataset', type= 'csv')
    # if dataset is not None:
    dataset = pd.read_csv('pd_speech_features.csv')

    if choice == 'Explore Data':
        exploreData(dataset)

    elif choice == 'EDA':
        eda(dataset)


    elif choice == 'Model Selection':
        features, target = divide(dataset)
        if st.sidebar.checkbox('Features Selection'):
        # select = st.sidebar.radio('Select Methods', ('SelectKBest', 'Standardization'))
        # if select == 'SelectKBest':
            st.write('Features Selection using SelectKBest')
            k = st.sidebar.slider('Set number of top features to select', min_value=5, max_value=50)
            st.sidebar.write('you selected', k)
            features = selectkBest(k, features, target)

        if st.sidebar.checkbox('Standardization'):
        # if select == 'Standardization':
            st.write('Standardization')
            st.write(features.shape)
            features = Standardscaler(features)
            st.write(features)

        X_train, X_test, y_train, y_test = train_split_data(features, target)


        # st.subheader('Model Building')
        model = st.sidebar.selectbox('Select ML Algorithms', ['MultiLayer Perceptron', 'Support Vector Machine', 'Self Organizing Maps', 'Learning Vector Quantization'])            
        if model == 'MultiLayer Perceptron':
            st.subheader('MultiLayer Perceptron')
            st.sidebar.subheader("Model Hyperparameters")
            hidden_layers = st.sidebar.slider('Hidden Layers', min_value=100, max_value=500, step=100)
            max_iter = st.sidebar.slider('No. of Iterations', min_value=100, max_value=1000, step=100)
            activation_func = st.sidebar.selectbox('Select Activation Function', ['identity', 'logistic', 'relu', 'tanh'])
            solver = st.sidebar.selectbox('Select Solver', ['adam', 'sgd'])
            if st.sidebar.button('Apply', 'mlp'):
                MLP(X_train, X_test, y_train, y_test, hidden_layers, activation_func, solver, max_iter)

        elif model == 'Support Vector Machine':
            st.subheader('Support Vector Machine')
            st.sidebar.subheader("Model Hyperparameters")
            start = st.sidebar.number_input('From', min_value=1.0, max_value=1000.0, step=1.0)
            end = st.sidebar.number_input('To', min_value=1.0, max_value=1000.0, step=1.0)
            C = np.arange(start, end+1)
            kernel = st.sidebar.multiselect('Select Kernels', ['linear', 'poly', 'rbf', 'sigmoid'])
            if st.sidebar.button('Apply', 'svm'):
                SVM(X_train, X_test, y_train, y_test, C, kernel)

        elif model == 'Self Organizing Maps':
            st.subheader('Self Organizing Maps')
            st.sidebar.subheader("Model Hyperparameters")
            epoch = st.sidebar.slider('Set epoch', min_value=50.0, max_value=1500.0, step=50.0)
            neighbor_fun = st.sidebar.selectbox('Select Neighborhood Function', ['gaussian', 'triangle'])
            if st.sidebar.button('Apply', 'som'):
                SOM(X_train, X_test, y_train, y_test, k, epoch, neighbor_fun)

        else:
            st.subheader('Learning Vector Quantization')
            epoch = st.sidebar.slider('Set epoch', min_value=50.0, max_value=1500.0, step=50.0)
            learn_rate = st.sidebar.number_input('Set Learning Rate', min_value=0.1, max_value=1.1, step=0.1)
            if st.sidebar.button('Apply', 'lvq'):
                LVQ(X_train, X_test, y_train, y_test, epoch, learn_rate)

    # else:
    #     st.write('Upload dataset first!!!')

    else:
        st.markdown(describe(), unsafe_allow_html=True)
コード例 #17
0
import numpy as np
import pandas as pd
from som import SOM
from sklearn.datasets import load_boston
from sklearn.preprocessing import StandardScaler

# read in data
boston = load_boston().data
# standard scale data
ss = StandardScaler()
X = ss.fit_transform(boston)

# instantiate SOM
som = SOM(X, 3, 3, 1, 100, 0.01)
# train model
tree, weights, winners, distances = som.train(X)
# print data
# print(weights)
print(set(winners))


dist, ind = som.predict(X, tree)
print(set([x[0] for x in ind]))
コード例 #18
0
ファイル: peptides.py プロジェクト: Wddzht/SelfOrganizingMap
# generate some virtual peptide sequences
libnum = 1000  # 1000 sequences per sublibrary
h = Helices(seqnum=libnum)
r = Random(seqnum=libnum)
n = AMPngrams(seqnum=libnum, n_min=4)
h.generate_sequences()
r.generate_sequences(proba='AMP')
n.generate_sequences()

# calculate molecular descirptors for the peptides
d = PeptideDescriptor(seqs=np.hstack((h.sequences, r.sequences, n.sequences)), scalename='pepcats')
d.calculate_crosscorr(window=7)

# train a som on the descriptors and print / plot the training error
som = SOM(x=12, y=12)
som.fit(data=d.descriptor, epochs=100000, decay='hill')
print("Fit error: %.4f" % som.error)
som.plot_error_history(filename="som_error.png")

# load known antimicrobial peptides (AMPs) and transmembrane sequences
dataset = load_AMPvsTM()
d2 = PeptideDescriptor(dataset.sequences, 'pepcats')
d2.calculate_crosscorr(7)
targets = np.array(libnum*[0] + libnum*[1] + libnum*[2] + 206*[3])
names = ['Helices', 'Random', 'nGrams', 'AMP']

# plot som maps with location of AMPs
som.plot_point_map(np.vstack((d.descriptor, d2.descriptor[206:])), targets, names, filename="peptidesom.png")
som.plot_density_map(np.vstack((d.descriptor, d2.descriptor)), filename="density.png")
som.plot_distance_map(colormap='Reds', filename="distances.png")
コード例 #19
0
ファイル: test.py プロジェクト: yuzhao4/som_project
import numpy as np
from som import SOM

# generate some random data with 36 features
data1 = np.random.normal(loc=-.25, scale=0.5, size=(500, 36))
data2 = np.random.normal(loc=.25, scale=0.5, size=(500, 36))
data = np.vstack((data1, data2))

som = SOM(10, 10)  # initialize the SOM
som.fit(data, 2000)  # fit the SOM for 2000 epochs

targets = 500 * [0] + 500 * [1]  # create some dummy target values

# now visualize the learned representation with the class labels
som.plot_point_map(data, targets, ['class 1', 'class 2'], filename='som.png')
som.plot_class_density(data,
                       targets,
                       1, ['class 1', 'class 2'],
                       filename='class_0.png')
コード例 #20
0
# For plotting the images

from __future__ import absolute_import
from matplotlib import pyplot as plt
import numpy as np
from som import SOM

colors = np.array([[0., 0., 1.], [0., 0., 0.95], [0., 0.05, 1.], [0., 1., 0.],
                   [0., 0.95, 0.], [0., 1, 0.05], [1., 0., 0.], [1., 0.05, 0.],
                   [1., 0., 0.05], [1., 1., 0.]])

som = SOM(4, 4, 3)
som.train(colors)

plt.imshow(som.centroid_grid)
plt.show()
コード例 #21
0
data0 = json.load(f)
f.close()
f = open('appdata.json', 'r')
applist = json.load(f)
applist = applist['applist']['apps']
f.close()
f = open('tagDATA2.json', 'r')
tag = json.load(f)
f.close()

data2 = []
for i in data0:
    data2.append(data0[i]['gameplay'])
data = np.array(data2)
# Train a 20x30 SOM with 400 iterations
som = SOM(5, 2, 10, 100)  # My parameters
som.train(data)
cnn0 = load_model('model0.h5')
cnn1 = load_model('model1.h5')
cnn2 = load_model('model2.h5')
cnn3 = load_model('model3.h5')
cnn4 = load_model('model4.h5')
cnn5 = load_model('model5.h5')
cnn6 = load_model('model6.h5')
cnn7 = load_model('model7.h5')
cnn8 = load_model('model8.h5')
cnn9 = load_model('model9.h5')


def gameRecommendation(id):
    id = str(id)
コード例 #22
0
    def __init__(self,
                 act=None,
                 pool=None,
                 with_memory=True,
                 summ=None,
                 residual=True,
                 log=False,
                 name="model"):
        super(Model, self).__init__(name=name)

        self._with_memory = with_memory
        self._summ = summ
        self._residual = residual
        self._num_blocks = 6

        self._log = log

        with self._enter_variable_scope():
            self._act = Activation(act, verbose=True)
            self._pool = Pooling(pool, padding='VALID', verbose=True)

            if self._residual:
                self._convs = [
                    snt.Conv2D(eval("FLAGS.num_outputs_block_%d" % (i + 1)),
                               FLAGS.filter_size,
                               padding=snt.VALID,
                               use_bias=False) for i in range(self._num_blocks)
                ]

                self._sepconvs = [
                    snt.SeparableConv2D(
                        eval("FLAGS.num_outputs_block_%d" % (i + 1)),
                        1,
                        FLAGS.filter_size,
                        padding=snt.SAME,
                        use_bias=False) for i in range(self._num_blocks)
                ]
            else:
                self._sepconvs = [
                    snt.SeparableConv2D(
                        eval("FLAGS.num_outputs_block_%d" % (i + 1)),
                        1,
                        FLAGS.filter_size,
                        padding=snt.VALID,
                        use_bias=False) for i in range(self._num_blocks)
                ]

            self._seq = snt.Sequential([
                snt.Linear(output_size=FLAGS.num_outputs_dense), tf.nn.relu,
                snt.Linear(output_size=FLAGS.num_classes)
            ])

            if self._with_memory:
                print("Model with memory enabled")

                config = \
                {
                    "height": FLAGS.memory_height,
                    "width": FLAGS.memory_width,
                    "input_size": 32, # very dangeous, hard-coded
                    "num_iters": FLAGS.num_iterations,
                    "learning_rate": FLAGS.lr_som
                }

                self._som = SOM(**config)
コード例 #23
0
                                     drop_last=False,# 不同fold测试数据不完全一样了,没关系但是不用model还是用了同样数据
                                     worker_init_fn=worker_init_fn)#if worker_init_fn=None, it use the torch.initial seed()
        test=[]
        test_lab=[]
        test_lab_list=[]
        for b in test_dataloader:
            test.extend(b['mol_fp'])
            test_lab_list.extend(b['labels'])#todo considring multi_lab in feature
        for b in test_lab_list:
            test_lab.extend(b)
        #print(f'len(test)={len(test)},test[0]={len(test[1])},{test[1]}')
        #print(f'len(test_lab)={len(test_lab)},test[0]={len(test_lab[1])},{test_lab[1]}')

        #todo add train func,predict func,合并 nepy  sofm  and here
        input_dim=args.input_dim
        som = SOM(input_size=input_dim, out_size=args.map)
        som = som.to(device)
        if train == True:
            losses = list()
            for epoch in tqdm(range(total_epoch)):
                running_loss = 0
                start_time = time.time()
                for idx, b in enumerate(train_dataloader):
                    X=torch.FloatTensor(b['mol_fp'])
                    #print(f'X.size{X.size()}')
                    X = X.view(-1, input_dim).to(device)    # flatten
                    loss = som.self_organizing(X, epoch, total_epoch)    # train som
                    running_loss += loss
                losses.append(running_loss)
                print('epoch = %d, loss = %.2f, time = %.2fs' % (epoch + 1, running_loss, time.time() - start_time))
                if epoch % 100 == 0:
コード例 #24
0
ファイル: detector.py プロジェクト: edouardfouche/NNOHD
                rbmfile.write('{};{};{}\n'.format(
                        counter, 
                        rbm.labels[counter],
                        value))
                counter += 1
            print('Saved as rbm_{}epochs_{}hidden_{}.csv\n'.format(
                    args.rbmepochs,
                    args.rbmhiddenneurons,
                    filename))

elif args.method in ('som', 'all'):
    from som import SOM
    
    som = SOM(filename,
              pathname,
              args.somepochs,
              args.somgrid,
              args.somgrid,
              args.delimiter)
    
    print('----------')
    print('SOM results on data set {}:'.format(filename))
    print('----------')
    print('Number of dimensions: {}'.format(som.dimensions_count))
    print('Number of data objects: {}'.format(som.objects_count))
    print('Number of outliers: {}'.format(som.outlier_count))
    print('----------')
    print('ROC AUC: {}'.format(som.roc_auc))
    print('PR AUC: {}'.format(som.pr_auc))
    print('F1 Score: {}'.format(som.f1_score))
    print('----------\n')
    if save_rankings:
コード例 #25
0
    inputFilename = 'myo_raw_data.csv'
    header = '% qx, qy, qz, qw, ax, ay, az, gx, gy, gz\n'

    try:
        f = open(inputFilename, 'r')
    except Exception as error:
        print("ERROR: Couldn't read from {}".format(inputFilename))
    else:
        with f:
            reader = csv.reader(f)
            for row in reader:
                if row[0][0] is not '%':
                    myo_data.append(row)
            print myo_data[0]

    #Training inputs for Myo data (quat, accel, gyro)
    som_data = np.array(myo_data)

    #Train a 20x30 SOM with 400 iterations
    som = SOM(SOM_X, SOM_Y, MYO_DATA_DIM, SOM_ITER)
    som.train(som_data)

    #Get output grid
    image_grid = som.get_centroids()

    #Plot
    plt.imshow(image_grid)
    plt.title('Myo SOM')
    plt.show()
コード例 #26
0
ファイル: main.py プロジェクト: ShawnWongMILab/SOM-LVQ
    for col in range(1, num_cols):
        col_values.append(input_sheet.col_values(col)[1:])
    x = np.array(col_values, dtype='|S4')
    y = x.astype(np.float)
    maxs = [max(y[col]) for col in range(0, num_cols - 1)]
    mins = [min(y[col]) for col in range(0, num_cols - 1)]
    data_points = []
    for row in range(1, num_rows):
        values = []
        for col in range(1, num_cols):
            values.append(
                (float(input_sheet.cell(row, col).value) - mins[col - 1]) /
                (maxs[col - 1] - mins[col - 1]))
        d = DataPoint(values, int(output_sheet.cell(row, 0).value))
        data_points.append(d)
    print(num_rows - 1, " points with dimesion=", num_cols - 1, " are added")
    return data_points


data_points = load_data("722.xlsx")
s = SOM(2, 8, 27)
s.load_input_data(data_points)
s.fit(2, 0.1)

v = LVQ(2, 6, 5)
v.load_data(data_points)
v.train(5, 0.01, 2)

s.predict(data_points)
v.predict(data_points)
コード例 #27
0
ファイル: test.py プロジェクト: Battleroid/som-python
r.next()  # skip header
data = []

# parse data
for row in r:
    l = []
    n = row[0]
    v = map(float, row[1:])
    v = map(lambda x: x / max(v), v)
    l.append(n)
    for x in v:
        l.append(x)
    data.append(l)

# create SOM & train
s = SOM()
results = s.train(data)

# restart if only one node (growth threshold not triggered)
while len(results) == 1:
    print 'Only one node, restarting...'
    s = SOM()
    results = s.train(data)

print """
Parameters summary:
    Epochs: {epochs}
    Cluster Threshold: >={threshold}
    Radius: {radius}
    Radius Decay Rate: {radius_decay}
    Rate: {rate}
コード例 #28
0
projection = {
    '_id': False,
    'title': True,
    'runtime': True,
    'metacritic': True,
    'tomato.rating': True,
    'year': True,
    'awards.wins': True
}

brute_data = conn.find_docs(collec='movieDetails', f=filtro, p=projection)
print("...Recuperados datos en bruto...")

# Limpia y estandariza la data
clean_data = cleaner(brute_data)
data = [list(map(standarize, lst)) for lst in clean_data]

print("...Creando una SOM...")
topo = len(data[0])
red = SOM(100, 100, topo)

print("...Entrenando la red...")
red.train(clean_data, L0=0.8, lam=1e2, sigma0=10)

print("...Construyendo gráfico en 2D...")
for p in red.som:
    build_JSON_coor(p, 'son_map')

print("...Calculando el error de cálculo...")
print(red.quant_err())
コード例 #29
0
from som import SOM 
from hcsr04 import HCSR04
from servo_motor import Servo
from rodas import Rodas
import time 

rodas = Rodas() 

sf = Servo(14)
som = SOM() 
sensorD = HCSR04(trigger_pin=12, echo_pin=13)


amostra = [0,0,0]

# Ângulos de posições: direita, frontal e esquerda 
angles = [10,90,170]

for i in range(10): 
    contI = 2
    for a in angles:
        sf.setAngle(a)
        time.sleep_ms(500)
        d = sensorD.distance_cm() 
        if d > 70: 
            d = 70
        amostra[contI] = d 
        time.sleep_ms(10)
        contI -= 1 
    print('Amostra:', amostra)
    acao = som.melhorReposta(amostra)
コード例 #30
0
from som import SOM
import pandas as pd
import numpy as np

input_data = pd.read_csv("C:/Users/KHT/Desktop/hw2out.csv")

som_net = SOM(10, 10, 2)

coor_data = input_data.iloc[np.random.permutation(len(input_data))]
trunc_data = coor_data[["x", "y"]]

print(trunc_data.values)
# print(trunc_data.values.shape[0])

som_net.train(trunc_data.values, num_epochs=1000, init_learning_rate=0.1)
"""
def predict(df):
    bmu, bmu_idx = som_net.find_bmu(df.values)
    df['bmu'] = bmu
    df['bmu_idx'] = bmu_idx
    return df


clustered_df = trunc_data.apply(predict, axis=1)
result_array = clustered_df.to_records().tolist()

print("ID number {0}".format(result_array[0][0]))
print("X Coordinate on topological map {0}".format(result_array[0][4][0]))
print("Y Coordinate on topological map {0}".format(result_array[0][4][1]))
"""