コード例 #1
0
def mainOld():
    """Old main function running files from our simulations."""

    # Normal and DDOS traffic ran over a long period of time.
    mixedLarge = "traffic-samples\mixed-traffic-sample\MixedDDOS"
    mixedLarge_1 = "traffic-samples\mixed-traffic-sample\MixedDDOS1"
    mixedLarge_2 = "traffic-samples\mixed-traffic-sample\MixedDDOS2"

    # Normal and DDOS traffic ran over a short period of time.
    mixedQuick = "traffic-samples\mixed-traffic-sample\MixedQuick"
    mixedQuick1 = "traffic-samples\mixed-traffic-sample\MixedQuick1"
    mixedQuick2 = "traffic-samples\mixed-traffic-sample\MixedQuick2"
    mixedQuick3 = "traffic-samples\mixed-traffic-sample\MixedQuick3"
    mixedQuick4 = "traffic-samples\mixed-traffic-sample\MixedQuick4"
    mixedQuick5 = "traffic-samples\mixed-traffic-sample\MixedQuick5"
    mixedQuick6 = "traffic-samples\mixed-traffic-sample\MixedQuick6"
    mixedQuick7 = "traffic-samples\mixed-traffic-sample\MixedQuick7"

    pcapPar = pcapParserOld(mixedQuick, mixedQuick1)

    print("File read in. Training.")
    pcap_tensors = pcapPar.pcap_list()

    # Set up the SOM
    som = SOM(n=5, m=5, dim=5, n_iterations=10)
    som.train(pcap_tensors)

    # Output simple color plot
    colors = som.color_inputsOld(pcapPar.pcap_dictionary())
    plt.imshow(colors)
    plt.title('Color SOM')
    plt.show()
コード例 #2
0
def run_mnist(n_train: int = 4000,
              n_test: int = 100,
              visualize: bool = True,
              n_epochs: int = 5,
              l_rate: float = 0.7,
              profile: bool = False):
    Utilities.delete_previous_output("mnist_images")
    mnist_features, mnist_labels, mnist_test_features, mnist_test_labels = DataReader.load_mnist(
        train_limit=n_train, test_limit=n_test)
    som = SOM(mnist=True,
              features=mnist_features,
              labels=mnist_labels,
              test_features=mnist_test_features,
              test_labels=mnist_test_labels,
              n_epochs=n_epochs,
              initial_radius=5,
              initial_l_rate=l_rate,
              radius_decay_func="pow",
              l_rate_decay_func="pow",
              n_output_cols=20,
              n_output_rows=20,
              display_interval=1 if visualize else -1)

    pr = cProfile.Profile()
    if profile:
        pr.enable()

    som.run()

    if profile:
        pr.disable()
        pr.print_stats(sort='time')
    if visualize:
        Utilities.make_gif(mnist=True)
コード例 #3
0
def Main(plan, names, sigma, sigmaDecay, alphaDecay):

    #samples, names = TestSamples()
    samples = SOMUtils.ExtractSOMSamples(plan, names)
    print "%d samples extracted" % len(samples), names

    scales, mins = ComputeScales(samples)
    dims = len(samples[0][1])

    # Neighborhood distance function.
    distFunc = lambda dsq: m.exp(-0.5 * dsq / (sigma * sigma))
    #som = SOM(CartesianGrid((20, 20)), dims, scales, distFunc)
    som = SOM(HexagonalGrid((20, 20)), dims, scales, distFunc)

    # Train the SOM.
    alpha = 1.0
    for i in range(100):
        if (i + 1) % 10 == 0:
            print "Iteration ", i + 1, "alpha = ", alpha, "sigma = ", sigma
        for id, sample in samples:
            som.Train(sample, alpha)
        alpha = alpha * alphaDecay
        sigma = sigma * sigmaDecay
        som.DistFunc = lambda dsq: m.exp(-0.5 * dsq / (sigma * sigma))

    SOMUtils.ExportSOM(open("OUT.som", "w"), som, names, samples)
コード例 #4
0
ファイル: Execution.py プロジェクト: yabernar/FastBMU
 def run(self):
     np.random.seed(self.metadata["seed"])
     if self.data is None:
         self.load_dataset()
     parameters = Parameters({
         "alpha":
         Variable(start=0.6, end=0.05, nb_steps=self.model["nb_epochs"]),
         "sigma":
         Variable(start=0.5, end=0.001, nb_steps=self.model["nb_epochs"]),
         "data":
         self.data,
         "neurons_nbr": (self.model["width"], self.model["height"]),
         "epochs_nbr":
         self.model["nb_epochs"],
         "topology":
         self.model["topology"],
         "bmu_search":
         self.model["bmu_search"]
     })
     if self.model["bmu_search"] == "Parallel":
         self.map = ParallelSOM(parameters)
         self.map.run_parallel()
     else:
         self.map = SOM(parameters)
         self.map.run()
コード例 #5
0
def restoreSOM(checkpoint_dir,lenExamples):
	"""
		restore the som which model is in the checkpoint_dir
	"""
	som = SOM(dimN, dimM, lenExamples, checkpoint_dir= checkpoint_dir, n_iterations=numIterations)

	loaded = som.restore_trained()
	if not loaded:
		raise ValueError("SOM in "+checkpoint_dir+" not trained yet")

	return som
コード例 #6
0
def run_tsm(city: int,
            visualize: bool = True,
            profile: bool = False,
            n_epochs: int = 400,
            l_rate: float = 0.39):
    Utilities.delete_previous_output("tsm_images")
    cities = DataReader.read_tsm_file(city)

    norm_cities = cities
    features = norm_cities[:, 1:]

    # TSM Hyper Params
    node_factor = 3
    radius_divisor = 1
    l_decay = "cur"
    r_decay = "pow"

    out_size = len(features) * node_factor
    init_rad = int(out_size / radius_divisor)

    som = SOM(mnist=False,
              features=features,
              n_epochs=n_epochs,
              n_output_rows=1,
              n_output_cols=out_size,
              initial_radius=init_rad,
              initial_l_rate=l_rate,
              radius_decay_func=r_decay,
              l_rate_decay_func=l_decay,
              originals=cities[:, 1:],
              display_interval=10 if visualize else -1)

    pr = cProfile.Profile()
    if profile:
        pr.enable()

    result = som.run()

    if profile:
        pr.disable()
        pr.print_stats(sort='time')

    Utilities.store_tsm_result(case=city,
                               epochs=n_epochs,
                               nodes=node_factor,
                               l_rate=l_rate,
                               radius=radius_divisor,
                               l_decay=l_decay,
                               r_decay=r_decay,
                               result=result)
    Utilities.make_gif(mnist=False) if visualize else NoOp
    return result
コード例 #7
0
def get_SOM(v_data):
    # Hiper-parámetros
    m = 100
    n = 100
    n_char = len(v_data[0])  # Número de características
    lr = 0.3  # Tasa de aprendizaje
    radius = m / 1.5  # Radio de actualización para los pesos de vecinos cercanos
    num_epoch = 100  # Número de épocas de entrenamiento

    SOM_layer = SOM(m, n, n_char, lr, radius)

    # Retorna el mapa de pesos resultante, y posiciones
    return SOM_layer.train(v_data, num_epoch, len(v_data))
コード例 #8
0
    def __init__(self,
                 X,
                 W=None,
                 map_shape=(8, 8),
                 n_clusters=10,
                 init_lr=0.1,
                 init_response=1,
                 max_iter_SOM=10000,
                 max_iter_clus=5000,
                 clus_method="kmeans",
                 normalize_data=False,
                 seed=0):

        # data and SOM map shape
        self.X = X
        if normalize_data:
            self.X = minmax_scale(self.X, axis=0)  # column-wise
        (self.N, self.d) = np.shape(X)
        self.map_shape = map_shape
        self.M = map_shape[0] * map_shape[1]  # number of nodes in the network
        self.W = W  # the weights of the output map

        # hyperparameters
        self.max_iter_SOM = max_iter_SOM
        self.max_iter_clus = max_iter_clus
        self.seed = seed
        self.n_clusters = n_clusters
        self.init_lr = init_lr
        self.init_response = init_response

        # first stage model
        self.model_SOM = SOM(X=self.X,
                             map_shape=self.map_shape,
                             init_lr=self.init_lr,
                             init_response=self.init_response,
                             max_iter=self.max_iter_SOM,
                             seed=self.seed)

        #  second stage model
        self.clus_method = clus_method
        if self.clus_method == "kmeans":
            self.model_clus = KMeans(n_clusters=self.n_clusters,
                                     random_state=self.seed,
                                     algorithm="full",
                                     max_iter=self.max_iter_clus,
                                     n_init=10)
        else:
            self.model_clus = GaussianMixture(n_components=self.n_clusters,
                                              max_iter=self.max_iter_clus,
                                              n_init=10,
                                              init_params="random")
コード例 #9
0
def loadFileSOM(file,classNameIndex):
	
	som = SOM(N = constants.getN(),maxK = constants.getSOMMaxK(),tolerance = constants.getSOMTolerance(),Tdistance = constants.getSOMDistanceT(),floatGamma = constants.getSOMGammaK(),alfaInicial = constants.getSOMInitialAlfa(),alfaFinal = constants.getSOMFinalAlfa(),variableGamma = False)
	#centers
	som.addInitialCenter([4.6,3.0,4.0,0.0],"Iris-setosa")
	som.addInitialCenter([6.8,3.4,4.6,0.7],"Iris-versicolor")
	fileHelper = FileHelper()

	try:
		f = fileHelper.openReadOnlyFile(file)
		
		lineas = f.readlines()
		xVector = []

		for linea in lineas:

			xVector = linea.strip("\r\n").split(",")
			del xVector[classNameIndex-1]
			xVector = [float(x) for x in xVector]

			som.addTrainingVector(xVector)

		return som
	except:
		print("Error al leer el fichero")
コード例 #10
0
def main_validation(version):
    '''
    Main to obtain the cluster validation measures (silhouette score and DB-index) for the implemented clustering methods
    '''

    # data
    data = pd.read_csv("Data/zipcodedata_KNN_version_" + str(version) + ".csv")
    data_normalised, _, _ = normalise(data)
    X = data_normalised.iloc[:,1:].values  # exclude pc4 variable

    # parameters
    k_range = np.r_[2:21]
    map_shape = (8, 8)

    # measures
    DB_measures = np.zeros((len(k_range), 4))  # rows the k, colums the models (order: TSC-kmeanss, TSC-GMM, kmeans, GMM)
    silhouette_measures = np.zeros((len(k_range), 4))  # rows the k, colums the models

    # models
    model_SOM = SOM(X=X, map_shape=map_shape)
    model_SOM.train(print_progress=True)
    W = model_SOM.map  # use this to train the kmeans and GMM for the TSC
    for i, k in enumerate(k_range):
        print("CURRENT k = %d" % k)
        models = [TwoStageClustering(X=X, W=W, n_clusters=k, map_shape=map_shape),
                  TwoStageClustering(X=X, W=W, n_clusters=k, clus_method="gmm", map_shape=map_shape),
                  KMeans(n_clusters=k, random_state=0, algorithm="full", max_iter=5000, n_init=10),
                  GaussianMixture(n_components=k, max_iter=5000, n_init=10, init_params="random")]

        for j, model in enumerate(models):
            if j < 2:  # first two models are two-stage models
                model.train(print_progress=False)
            elif j == 2:
                print("Training k-means....")
                t0 = time()
                model.fit(X)
                print("The k-means algorithm took %.3f seconds" % (time()-t0 ))
            elif j == 3:
                print("Training GMM....")
                t0 = time()
                model.fit(X)
                print("The GMM algorithm took %.3f seconds" % (time()-t0 ))

            labels = model.predict(X)
            DB_measures[i,j] = davies_bouldin_score(X, labels)
            silhouette_measures[i, j] = silhouette_score(X, labels)
        print("")

    np.savetxt("Results/DB_measures.txt", DB_measures, delimiter=',')
    np.savetxt("Results/silhouette_measures.txt", silhouette_measures, delimiter=',')
コード例 #11
0
def run_animals_experiment():
    props, names = Utils.load_animals()

    # props = shuffle(props)

    weight_shape = (100, 84)
    epochs = 20
    eta = 0.2

    som = SOM(shape=weight_shape, n_epochs=epochs, eta=eta, neighbors_num=50)
    som.fit(props)

    pred = som.predict(props, names)

    print(pred)
コード例 #12
0
def run_cities_experiment():
    cities_data, cities_labels = Utils.load_cities()

    weight_shape = (10, 2)
    epochs = 20
    eta = 0.2

    som = SOM(shape=weight_shape,
              n_epochs=epochs,
              eta=eta,
              neighbors_num=7,
              neighbohood_function='circular')
    som.fit(cities_data)

    pred = som.predict(cities_data, cities_labels)

    print(pred)

    Utils.plot_cities_tour(cities_data, pred)
コード例 #13
0
def distanceIntraClass(SOM, inputs, nameInputs):
	"""
		calculate the intra-cluster and inter-cluster distances based on a specific som and a set of inputs;
		the clusters are the set of bmus that belong to the same class of objects
	"""
	print('- extraction bmus -')
	mapped = SOM.map_vects(inputs)

	positions = dict()
	for i, m in enumerate(mapped):
		if nameInputs[i] in positions:
			positions[nameInputs[i]].append([m[1],m[0]])
		else:
			positions[nameInputs[i]] = [[m[1],m[0]]]


	distancesIntra = dict()
	print('- intra-cluster distance - ')
	posKey = positions.keys()
	posKey.sort()
	for c in posKey:
		d = 0
		count = 0
		for i in positions[c]:
			i1 = positions[c].index(i)
			for j in positions[c][i1+1:]:
				d += math.sqrt(( (j[0]-i[0])**2 ) + ((j[1]-i[1])**2 ))
				count += 1

		distancesIntra[c] = d / count

		print('--- '+c+' -> '+str(distancesIntra[c]))


	allPositions = []
	print('- inter-cluster distance :')
	for c in posKey:
		for p in positions[c]:
			allPositions.append(p)
	d = 0
	count = 0
	for i in allPositions:
		i1 = allPositions.index(i)
		for j in allPositions[i1+1:]:
			d += math.sqrt(( (j[0]-i[0])**2 ) + ((j[1]-i[1])**2 ))
			count += 1

	distancesExtra = d / count


	print('- ratio between the intra and inter cluster distances')
	for c in posKey:
		print(c + ';' + str(distancesIntra[c]/distancesExtra))
コード例 #14
0
def run_mp_votes_experiment():
    votes, mpnames, mpsex, mpdistrict, mpparty, votes_labels = Utils.load_MPs()

    weight_shape = (100, 31)
    epochs = 20
    eta = 0.2

    som = SOM(shape=weight_shape,
              n_epochs=epochs,
              eta=eta,
              neighbors_num=2,
              neighbohood_function='manhattan')
    som.fit(votes)

    pred = som.predict_2_D(votes)

    # print(pred)

    party_names = {
        0: 'No party',
        1: 'M',
        2: 'Fp',
        3: 'S',
        4: 'V',
        5: 'MP',
        6: 'KD',
        7: 'C'
    }
    sex_names = {0: 'Male', 1: 'Female'}
    distr_names = {}
    for i in range(1, 30):
        distr_names[i] = "District: {}".format(i)
    party_votes = Utils.generate_dict(pred, party_names, mpparty)
    sex_votes = Utils.generate_dict(pred, sex_names, mpsex)
    distr_votes = Utils.generate_dict(pred, distr_names, mpdistrict)

    Utils.plot_mp_votes(sex_votes, sex_names, type='sex')
    Utils.plot_mp_votes(party_votes, party_names, type='party')
    Utils.plot_mp_votes(distr_votes, distr_names, isDistrict=True)
コード例 #15
0
def SOM_clustering():    

    splits_count = 4
    selectedCab_index = 1
    selectedCab = []
    
    for q in open('sampled.txt', 'r').read().split():
        selectedCab.append(int(q))  
    
    
    for split in range(1,splits_count+1):
        arr = np.array([])
        print("Split number : %d" % (split))        
        for cabID in selectedCab:
            arrcol =  np.array([])
            #print(cabID)
            filename = "..//Split//" + str(cabID) + "_" + str(split) + "_.traj"
            with open(filename) as f:
                 for line in f:
                     line = line.rstrip()
                     arrcol = np.concatenate((arrcol,np.array([int(line)])),axis = 0)
            #print(arrcol)
            if len(arr) == 0:
               arr = np.concatenate((arr,arrcol),axis = 0)
            else:
               arr = np.vstack((arr,arrcol))
        testX = arr
        print(testX)
        print ("Cluster Start")
        length = len(arr[0])
        print("Dim : %d"  %(length))
        #Train a 20x30 SOM with 400 iterations
        som = SOM(20, 30, length, 1)
        som.train(testX)
        print ("get_centroids")
        #Get output grid
        image_grid = som.get_centroids()
        #Map colours to their closest neurons
        mapped = som.map_vects(testX)

        print ("Plot")
        array = np.zeros((20,30))
        filename_ClusterLabels = "Cabs_ClusterLabels" + str(split) +".csv"
        target = open(filename_ClusterLabels,'w')
        for i, m in enumerate(mapped):
            index = ((m[0]) * 2) + (m[1]+1)
            target.write("%s\n" % (index))
            #print(index)
            array[m[0], m[1]] += 1
        target.close()
        array = array.astype(int)
        array = np.reshape(array,600)
        print(array)
        #break;
    
    return;
コード例 #16
0
ファイル: main.py プロジェクト: mhh0318/EE7207
def SOM_RBF():
    train = True
    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    data_train = scio.loadmat('data_train.mat')
    data_test = scio.loadmat('data_test.mat')
    data_label = scio.loadmat('label_train.mat')
    X = torch.tensor(data_train['data_train']).to(device)
    train_label = torch.tensor(data_label['label_train']).to(device)
    test_data = torch.tensor((data_test['data_test'])).to(device)
    print('Building SOM model...')
    model = SOM(input_size=33, hidden_size=16, sigma=np.sqrt(81))
    model = model.to(device)
    if train == True:
        losses = list()
        for epoch in range(1000):
            running_loss = 0

            loss = model.self_organizing(X, epoch)  # so phase
            running_loss += loss

            losses.append(running_loss)
            print('epoch = %d, loss = %.2f' % (epoch + 1, running_loss))
        for epoch in range(30000):
            running_loss = 0

            loss = model.convergence(X)  # conv phase
            running_loss += loss

            losses.append(running_loss)
            print('epoch = %d, loss = %.2f' % (epoch + 1, running_loss))
        train = False
    feedback = model.plot_point_map(X, train_label, ['Class 0', 'Class 1'])
    a, b, c = feedback
    tb = (a - b) / a
    tc = (a - c) / a
    accuracy0 = np.maximum(tb, tc).mean()
    center_vectors = model.weight.T
    print('Building RBF model...')
    RBF_model = RBF(33, center_vectors, 1)
    RBF_model = RBF_model.to(device)
    weight = RBF_model.train(X, train_label)
    predictions = RBF_model.test(X).unsqueeze(1)
    accuracy1 = (predictions == train_label.cpu()).sum() / torch.tensor(
        X.size(0)).float()
    predictions_y = RBF_model.test(test_data)
    print(accuracy1)
    return accuracy0, accuracy1, predictions_y
コード例 #17
0
# -*- coding: utf-8 -*-

__author__ = "alexander"
__date__ = "$05.06.2011 21:30:11$"

from ImageContainer import ImageContainer
from ImageContainer import ImagePattern

from SOM import SOM

if __name__ == "__main__":
    imgContainer = ImageContainer()
    imgContainer.fromDirectory("")
    #imgContainer.analyzeAll()
    som = SOM(3, 5, 5, None)
    som.clustering(imgContainer.images)

    for image in imgContainer.images:
        print som.coordinates(image)
コード例 #18
0
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness of any
# information, apparatus, product, or process disclosed, or represents that
# its use would not infringe privately owned rights.
#

# $Id$

import DST
from SOM import SOM
from SOM import SO
from time import localtime, strftime, time

filename_SOM1 = "stuff1.dat"

SOM1 = SOM()
SOM1.attr_list["filename"] = filename_SOM1
SOM1.attr_list["epoch"] = time()
SOM1.attr_list["timestamp"] = DST.make_ISO8601(SOM1.attr_list["epoch"])
SOM1.attr_list["username"] = "******"
SOM1.setAllAxisLabels(["Q", "E"])
SOM1.setAllAxisUnits(["A-1", "meV"])
SOM1.setYLabel("Intensity")
SOM1.setYUnits("Counts/(meV A-1))")

SO1 = SO(2)
SO1.id = 0
SO1.axis[0].val.extend(range(5))
SO1.axis[1].val.extend(range(10))

y_len = (len(SO1.axis[0].val) - 1) * (len(SO1.axis[1].val) - 1)
コード例 #19
0
ファイル: Main.py プロジェクト: ogzhnndrms/som
if Path("Test Data/test_file.csv").is_file() is False:
    formatter.format_validation_data()
    formatter.generate_validation_file()

# Train SOM, if there is no weights.
if Path("weights.txt").is_file() is False:

    # Use pandas for loading data using dataframes.
    d = os.path.dirname(os.getcwd())
    file_name = "huge_merged_csv_file.csv"
    data = pd.read_csv(file_name, header=None)
    # Shuffle the data in place.
    data = data.sample(frac=1).reset_index(drop=True)

    # create SOM object
    som = SOM(7, 1)

    # Train with 100 neurons.
    som.train(data)

    # Get output grid
    grid = som.get_centroids()

    # Save the weights in a file.
    result_file = open("weights.txt", "w")
    result_file.writelines(str(grid))
    result_file.close()

# Map data to neurons.
mapped_vectors = mapper.map_vects()
tester.open_test_file()
コード例 #20
0
if __name__ == '__main__':
  #read the inputs from the file fInput and show the SOM with the BMUs of each input

  inputs = np.zeros(shape=(N,lenExample))
  nameInputs = list()

  # read the inputs
  with open(fInput, 'r') as inp:
      i = 0
      for line in inp:
        if len(line)>2:
          inputs[i] = (np.array(line.split(',')[1:])).astype(np.float)
          nameInputs.append((line.split(',')[0]).split('/')[6])
          i = i+1

  prototipi = classPrototype(inputs,nameInputs)

  #get the 20x30 SOM or train a new one (if the folder does not contain the model)
  som = SOM(20, 30, lenExample, checkpoint_dir= './VisualModel10classes/', n_iterations=20,sigma=4.0)

  loaded = som.restore_trained()
  if not loaded:
    som.train(inputs)

  for k in range(len(nameInputs)):
    nameInputs[k] = nameInputs[k].split('_')[0]

  #shows the SOM
  showSom(som,inputs,nameInputs,1,'Visual map')
コード例 #21
0
ファイル: TestUnit.py プロジェクト: mghmgh1281375/POCS
            clf = cluster.fit(self.X, 40000)
            for i, x in enumerate(self.X_test):
                predicted = clf.predict(x)
                print('Actual: {}, Predicted: {}'.format(self.Y_test[i], predicted))
                # if predicted:
                #     if predicted == self.Y_test[i]:
                #         success += 1
            del(cluster)


if __name__ == "__main__":
    logging.info('Test started ...')
    # print(metrics.classification_report(expected, predicted))
    # confusion_matrix = metrics.confusion_matrix(expected, predicted)
    # print(confusion_matrix)
    # data_generator = generator

    # Tester(data_generator, [NearestNeighbor(k=1), NearestNeighbor(k=2), Parzen(r=0.1), Bayesian()]).run()

    from pickle import dump, load
    from SOM import SOM
    clusters = [SOM(10, (6, 6))]
    intensity = Intensity()
    train_X = load(open('MLP/resources/train-100d-X.pickle', 'rb'))
    train_Y = load(open('MLP/resources/train-100d-Y.pickle', 'rb'))
    test_X = load(open('MLP/resources/test-100d-X.pickle', 'rb'))
    test_Y = load(open('MLP/resources/test-100d-Y.pickle', 'rb'))

    Tester(train_X, train_Y, test_X, test_Y, clusters=clusters).run()

コード例 #22
0
ファイル: ClusterFind.py プロジェクト: LasTAD/VAST-2017-MC-1
import numpy as np
from SOM import SOM

data = np.loadtxt('Data/output.txt', delimiter=';', usecols=range(40))

###SOM
som = SOM(10, 10)  # initialize the SOM
som.fit(data, 10000, decay='hill')

# som = SOM(10, 10)  # initialize the SOM
# som.load('Data/SOM')

targets = np.loadtxt('Data/target.txt', dtype='int')

targets = targets - 1

names = [
    'Автомобиль', 'Грузовик 2', 'Грузовик 3', 'Грузовик 4+', 'Автобус 2',
    'Автобус 3', 'Грузовик рейнджеров'
]
# now visualize the learned representation with the class labels
som.plot_point_map(data, targets, names, filename='images/SOM/som.png')

# for name in names:
#     som.plot_class_density(data, targets, t=names.index(name), name=name, filename='images/SOM/density ' + name + '.png')

# som.save('SOM')
コード例 #23
0
ファイル: test_rednxs.py プロジェクト: ornl-ndav/DOM
# This material was prepared as an account of work sponsored by an agency of
# the United States Government.  Neither the United States Government nor the
# United States Department of Energy, nor any of their employees, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness of any
# information, apparatus, product, or process disclosed, or represents that
# its use would not infringe privately owned rights.
#

# $Id$

import DST
from SOM import SOM, SO, Sample, Instrument
from nessi_list import NessiList

SOM1 = SOM()
SOM1.setDataSetType("histogram")
SOM1.setYLabel("Intensity")
SOM1.setYUnits("counts A / meV")
SOM1.setAllAxisLabels(["momentum transfer", "energy transfer"])
SOM1.setAllAxisUnits(["1/A", "meV"])
SOM1.attr_list["data-title"] = "Test S(Q,E)"
SOM1.attr_list["data-run_number"] = "1344"

DSample = Sample()
DSample.name = "Test Sample"
DSample.nature = "CoCo"
SOM1.attr_list.sample = DSample

x = NessiList()
y = NessiList()
コード例 #24
0
next(reader)
i = 0
audio_features = []
track_names = []
for row in reader:
    audio_features.append(list(row[i] for i in range(7, 17)))
    track_names.append(list(row[i] for i in range(6, 7)))
    i += 1
    if i == num_tracks:
        break

# Training inputs for tracks
audio_features = np.asarray(audio_features, dtype=float)

# Train a 20x30 SOM with 10 iterations
som = SOM(20, 30, 10, 50)
som.train(audio_features)

# Get output grid
image_grid = som.get_centroids()

# Map tracks to their closest neurons
mapped = som.map_vects(audio_features)

# Plot
plt.imshow(image_grid[10])
plt.title('Track SOM')
plt.ylim([0, 20])
plt.xlim([0, 30])
for i, m in enumerate(mapped):
    plt.text(m[1],
コード例 #25
0
def prepare():
    """
    prepares and returns all data structures to be trained.
    """

    # initialize parameters.
    n_iter = 20  # number of iterations performed per episode.
    n_episodes = 700  # number of all episodes. 20 * 700 = 14000
    n_categories = 4
    max_words = 4
    alpha1 = 0.3  # SOM learning rate.
    alpha2 = 0.2  # Hebbian links learning rate.
    alpha3 = 0.1  # Bootstrapping tables learning rate.
    input_threshold = 0.9  # Input threshold used to determine if word was "said".

    # initialize SOMs
    # c is empirically determined based on number of possible words for each SOM.
    position_som = SOM(16,
                       16,
                       3,
                       c=10.0,
                       alpha=alpha1,
                       n_iterations=n_iter,
                       n_episodes=n_episodes)
    size_som = SOM(16,
                   16,
                   1,
                   c=16.0,
                   alpha=alpha1,
                   n_iterations=n_iter,
                   n_episodes=n_episodes)
    color_som = SOM(16,
                    16,
                    3,
                    c=10.0,
                    alpha=alpha1,
                    n_iterations=n_iter,
                    n_episodes=n_episodes)
    shape_som = SOM(16,
                    16,
                    4,
                    c=5.0,
                    alpha=alpha1,
                    n_iterations=n_iter,
                    n_episodes=n_episodes)
    soms = [position_som, size_som, color_som, shape_som]

    # initialize WordVector a.k.a data generator.
    word_vector = WordVector()

    # initialize Hebbian links to/from each SOM.
    hebb1 = Hebbian(word_vector,
                    position_som,
                    alpha=alpha2,
                    n_iterations=n_iter,
                    n_episodes=n_episodes)
    hebb2 = Hebbian(word_vector,
                    size_som,
                    alpha=alpha2,
                    n_iterations=n_iter,
                    n_episodes=n_episodes)
    hebb3 = Hebbian(word_vector,
                    color_som,
                    alpha=alpha2,
                    n_iterations=n_iter,
                    n_episodes=n_episodes)
    hebb4 = Hebbian(word_vector,
                    shape_som,
                    alpha=alpha2,
                    n_iterations=n_iter,
                    n_episodes=n_episodes)
    hebbs = [hebb1, hebb2, hebb3, hebb4]

    # initialize bootstrapping tables.
    word_x_category = WordXCategory(word_vector,
                                    position_som.m * position_som.n,
                                    n_categories,
                                    alpha=alpha3)
    length_x_position = LengthXPosition(word_vector,
                                        word_x_category,
                                        n_categories,
                                        max_words,
                                        alpha=alpha3)

    # initialize Tensorflow session.
    sess = tf.Session()

    init_op = tf.global_variables_initializer()
    sess.run(init_op)

    # set correct session to all previously generated objects.
    set_sessions([soms, hebbs, word_x_category, length_x_position], sess)
    return word_vector, soms, hebbs, word_x_category, length_x_position, n_iter, n_episodes, input_threshold
コード例 #26
0
ファイル: test_gsas.py プロジェクト: ornl-ndav/DOM
# United States Department of Energy, nor any of their employees, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness of any
# information, apparatus, product, or process disclosed, or represents that
# its use would not infringe privately owned rights.
#

# $Id$

import DST
from SOM import SOM
from SOM import SO

filename_SOM1 = "stuff1.dat"

SOM1 = SOM()
SOM1.attr_list["filename"] = filename_SOM1
SOM1.setTitle("This is a test")

for i in range(2):
    SO1 = SO()
    for j in range(10):
        SO1.id = i
        SO1.axis[0].val.append(j+1)
        SO1.y.append(1000+j+(20*j))
        SO1.var_y.append(100+j)
    SO1.axis[0].val.append(11)

    SOM1.append(SO1)

file = open(filename_SOM1, "w")
コード例 #27
0
import matplotlib.pyplot as plt

#Training inputs for RGBcolors
colors = np.array([[0., 0., 0.], [0., 0., 1.], [0., 0., 0.5],
                   [0.125, 0.529, 1.0], [0.33, 0.4, 0.67], [0.6, 0.5, 1.0],
                   [0., 1., 0.], [1., 0., 0.], [0., 1., 1.], [1., 0., 1.],
                   [1., 1., 0.], [1., 1., 1.], [.33, .33, .33], [.5, .5, .5],
                   [.66, .66, .66]])
colorNames = [
    'black', 'blue', 'darkblue', 'skyblue', 'greyblue', 'lilac', 'green',
    'red', 'cyan', 'violet', 'yellow', 'white', 'darkgrey', 'mediumgrey',
    'lightgrey'
]

#Train a 20x30 SOM with 400 iterations
som = SOM(m=20, n=30, dim=3, epochs=400)
som.train(colors)

#Get output grid
imageGrid = som.get_centroids()

#Map colours to their closest neurons
mapped = som.map_vects(colors)

#Plot
plt.imshow(imageGrid)
plt.title('Color SOM')
for i, m in enumerate(mapped):
    plt.text(m[1],
             m[0],
             colorNames[i],
コード例 #28
0
ファイル: test_dave2d.py プロジェクト: ornl-ndav/DOM
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness of any
# information, apparatus, product, or process disclosed, or represents that
# its use would not infringe privately owned rights.
#

# $Id$

import DST
from SOM import SOM
from SOM import SO
from time import localtime, strftime, time

filename_SOM1 = "stuff1.dat"

SOM1 = SOM()
SOM1.attr_list["filename"] = filename_SOM1
SOM1.attr_list["epoch"] = time()
SOM1.attr_list["timestamp"] = DST.make_ISO8601(SOM1.attr_list["epoch"])
SOM1.attr_list["username"] = "******"
SOM1.setAllAxisLabels(["Q", "E"])
SOM1.setAllAxisUnits(["A-1", "meV"])
SOM1.setYLabel("Intensity")
SOM1.setYUnits("Counts/(meV A-1))")

SO1 = SO(2)
SO1.id = 0
SO1.axis[0].val.extend(range(5))
SO1.axis[1].val.extend(range(10))

y_len = (len(SO1.axis[0].val)-1) * (len(SO1.axis[1].val)-1)
コード例 #29
0
ファイル: run_SOM.py プロジェクト: ZamyslovSE/neural_network
varCount = 2
gridCount = 80
learningRate = 0.1
searchRadius = 0.4
tau1 = 1000
tau2 = 1000
pointCount = 1500

#INIT NEURON MATRIX
ar = numpy.linspace(0.0, 1.0, gridCount)
neurons = []
for i in range(gridCount):
    for j in range(gridCount):
        neurons.append(Neuron([ar[i], ar[j]],[i,j]))
        
som = SOM(neurons, varCount, learningRate)

def randomColor(classCount): # Задание случайного цвета для отображения множества
    vals = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F']
    colorArr = []
    for j in range(classCount):  
        color = '#'
        for i in range(6):
            color += vals[random.randint(0, 15)]
        colorArr.append(color)
    return colorArr

def flattenArray(points):
    flatArray = []
    for cl in points:
        flatArray += cl
コード例 #30
0
    y_train = y_train.reshape((X_train.shape[0], 1))
    y_test = y_test.reshape((X_test.shape[0], 1))

    ############################
    # SOM
    ############################
    # select some samples as random weights fo initialization
    initial_sample_indexes = np.random.permutation(X_train.shape[0])

    shape_net = (1, 3, 1)
    number_of_neurons = shape_net[0] * shape_net[1] * shape_net[2]

    som = SOM(shape=shape_net,
              number_of_feature=X_train.shape[1],
              distance_measure_str='cosine',
              topology='cubic',
              init_learning_rate=0.4,
              max_epoch=300,
              samples_for_init=X_train[initial_sample_indexes[0:number_of_neurons]])

    # Train SOM
    som.fit(X=X_train)

    y_train_pre = som.predict(X=X_train)
    y_test_pre = som.predict(X=X_test)

    print("###############################\n###########  SOM  #############\n###############################")
    print('PU-Train-SOM: ', purity_measure(clusters=y_train_pre, classes=y_train))
    print('PU-Test-SOM: ', purity_measure(clusters=y_test_pre, classes=y_test))

    print('RI-Train-SOM: ', rand_index(clusters=y_train_pre, classes=y_train))
コード例 #31
0
parser = optparse.OptionParser(usage="usage: %prog [options]")

parser.add_option("", "--with-xvar", action="store_true", dest="withXvar",
                  help="Create SOs with variances on x axis")
parser.set_defaults(withXvar=False)

parser.add_option("", "--extra-som", action="store_true", dest="extraSom",
                  help="Create another SOM for an extra column")
parser.set_defaults(extraSom=False)

(options, args) =  parser.parse_args()

filename_SOM1 = "stuff1.dat"

SOM1 = SOM()
SOM1.attr_list["filename"] = filename_SOM1
SOM1.attr_list["username"] = "******"
SOM1.setAllAxisLabels(["TOF"])
SOM1.setAllAxisUnits(["microseconds"])
SOM1.setYLabel("Counts")
SOM1.setYUnits("counts")
SOM1.setDataSetType("histogram")

if options.extraSom:
    SOM2 = SOM()
    SOM2.copyAttributes(SOM1)
    SOM2.setAllAxisLabels(["Wavelength"])
    SOM2.setAllAxisUnits(["Angstroms"])
    SOM2.setYLabel("Counts")
    SOM2.setYUnits("counts")
コード例 #32
0
ファイル: test_cansas1d.py プロジェクト: ornl-ndav/DOM
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government.  Neither the United States Government nor the
# United States Department of Energy, nor any of their employees, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness of any
# information, apparatus, product, or process disclosed, or represents that
# its use would not infringe privately owned rights.
#

# $Id$

import DST
from SOM import SOM, SO, Sample, Instrument

SOM1 = SOM()
SOM1.setDataSetType("histogram")
SOM1.setYLabel("Intensity")
SOM1.setYUnits("counts A")
SOM1.setAllAxisLabels(["Q"])
SOM1.setAllAxisUnits(["1/A"])
SOM1.attr_list["data-title"] = "Test File"
SOM1.attr_list["data-run_number"] = "1344"

DSample = Sample()
DSample.name = "Test Sample"
DSample.nature = "K3NO+"
SOM1.attr_list.sample = DSample

DInst = Instrument(instrument="SANS", primary=(15.0,0.0),
                   det_secondary=(2.0,0.0),
コード例 #33
0
ファイル: test_mwd.py プロジェクト: ornl-ndav/DOM
# to reproduce, prepare derivative works, and distribute copies to the public
# for any purpose and without fee.
#
# This material was prepared as an account of work sponsored by an agency of
# the United States Government.  Neither the United States Government nor the
# United States Department of Energy, nor any of their employees, makes any
# warranty, express or implied, or assumes any legal liability or
# responsibility for the accuracy, completeness, or usefulness of any
# information, apparatus, product, or process disclosed, or represents that
# its use would not infringe privately owned rights.
#

# $Id$

import DST
from SOM import SOM
import hlr_utils

filename = "test.xml"

SOM = SOM()
conf = hlr_utils.Configure()
conf.verbose = True
SOM.attr_list["config"] = conf

ofile = open(filename, "w")

mdw = DST.MdwDST(ofile)
mdw.writeSOM(SOM)
mdw.release_resource()
コード例 #34
0
def main():
    # setup
    env = gym.make('internet.SlitherIO-v0')
    env.configure(remotes=1, fps=5)
    observation_n = env.reset()

    tiny_image_h = 20
    tiny_image_w = 30  # for tiny image
    SOM_WIDTH = 9
    SOM_HEIGHT = 9
    buffer_threshold = 10
    som = SOM.SOM(SOM_WIDTH,
                  SOM_HEIGHT,
                  tiny_image_w,
                  tiny_image_h,
                  learning_rate=1,
                  decay_rate=.95,
                  radius=3)
    time_to_switch_to_live = 60 * 5
    live_bool = False

    ## Define Actions on keyboard
    left = [universe.spaces.PointerEvent(30, 240, 0)]
    right = [universe.spaces.PointerEvent(515, 240, 0)]
    up = [universe.spaces.PointerEvent(275, 95, 0)]
    down = [universe.spaces.PointerEvent(275, 380, 0)]
    boost_left = [universe.spaces.PointerEvent(30, 240, 1)]
    boost_right = [universe.spaces.PointerEvent(515, 240, 1)]
    boost_up = [universe.spaces.PointerEvent(275, 95, 1)]
    boost_down = [universe.spaces.PointerEvent(275, 380, 1)]

    # left_boost = [('KeyEvent', 'ArrowUp', True), ('KeyEvent', 'ArrowLeft', True), ('KeyEvent', 'ArrowRight', False)]
    # left = [('KeyEvent', 'ArrowUp', False), ('KeyEvent', 'ArrowLeft', True), ('KeyEvent', 'ArrowRight', False)]
    # right = [('KeyEvent', 'ArrowUp', False), ('KeyEvent', 'ArrowLeft', False), ('KeyEvent', 'ArrowRight', True)]
    # right_boost = [('KeyEvent', 'ArrowUp', True), ('KeyEvent', 'ArrowLeft', False), ('KeyEvent', 'ArrowRight', True)]
    # forward = [('KeyEvent', 'ArrowUp', True), ('KeyEvent', 'ArrowLeft', False), ('KeyEvent', 'ArrowRight', False)]
    # still = [('KeyEvent', 'ArrowUp', False), ('KeyEvent', 'ArrowLeft', False), ('KeyEvent', 'ArrowRight', False)]
    # possible_actions.append(still)
    # possible_actions.append(forward)
    possible_actions = [
        left, right, up, down, boost_down, boost_right, boost_up, boost_left
    ]
    bad_actions = [boost_right, boost_left, boost_up, boost_left]
    # possible_actions.append(right_boost)
    # possible_actions.append(left_boost)
    # bad_actions = [left_boost, right_boost, forward]

    q_learner = QLearner(SOM_WIDTH, SOM_HEIGHT, possible_actions)

    frame_buffer = queue.Queue(buffer_threshold * 2)
    action_buffer = queue.Queue(buffer_threshold * 2)
    rewards_buffer = queue.Queue(buffer_threshold * 2)
    list_of_buffers = [frame_buffer, action_buffer, rewards_buffer]

    trials = []
    reward_total = 0.0

    action = up
    state_w = 0
    prev_state_w = None
    prev_state_h = None
    last_image = None
    start_time = time.time()
    while True:

        if observation_n[0] != None:
            if time.time(
            ) - start_time > time_to_switch_to_live and not live_bool:
                live_bool = True
                shorten_buffers_to_one(list_of_buffers)
                buffer_threshold = 1

            if info['n'][0]["env_status.env_state"] == "running" and reward_n[
                    0] is not None:
                frame_buffer.put_nowait(crop(observation_n[0]["vision"]))
                action_buffer.put_nowait(action)
                rewards_buffer.put_nowait(reward_n[0])
                reward_total += reward_n[0]
                if frame_buffer.qsize() > buffer_threshold:
                    current_frame = frame_buffer.get_nowait()
                    current_action = action_buffer.get_nowait()
                    current_reward = rewards_buffer.get_nowait()
                    action_n = [action for ob in observation_n]
                    last_image = current_frame
                    # add here
                    image_for_som, snake_dist = process_image(
                        last_image, tiny_image_w, tiny_image_h)
                    state_w, state_h = som.train(image_for_som)
                    if prev_state_w is None:
                        prev_state_w = state_w
                        prev_state_h = state_h
                        action = q_learner.select_action(state_w, state_h)
                    else:
                        # add here
                        default_reward = 5 + reward_n[0] + (50.0 / (
                            (snake_dist + 1.0) * -1.0))
                        '''if current_action in bad_actions:
                            if current_reward < 0:
                                default_reward = current_reward
                            else:
                                default_reward = 0'''
                        q_learner.update_qtable(current_action, prev_state_w,
                                                prev_state_h, state_w, state_h,
                                                default_reward)
                        action = q_learner.select_action(state_w, state_h)
                        prev_state_w = state_w
                        prev_state_h = state_h
                    cv2.imshow("Frame Training",
                               cv2.cvtColor(last_image, cv2.COLOR_RGB2BGR))
            else:
                # do stuff with death here
                if reward_total > 0.0:
                    trials.append(reward_total)
                    print(trials)
                if frame_buffer.qsize() > 0:
                    current_frame = frame_buffer.get_nowait()
                    current_action = action_buffer.get_nowait()
                    current_reward = rewards_buffer.get_nowait()
                    last_image = current_frame
                    # add here
                    image_for_som, snake_dist = process_image(
                        last_image, tiny_image_w, tiny_image_h)
                    state_w, state_h = som.train(image_for_som)
                    # The following assumes that prev_state has been defined
                    q_learner.update_qtable(current_action, prev_state_w,
                                            prev_state_h, state_w, state_h,
                                            -500)
                    action = q_learner.select_action(state_w, state_h)
                    prev_state_w = state_w
                    prev_state_h = state_h
                    cv2.imshow("Frame Training",
                               cv2.cvtColor(last_image, cv2.COLOR_RGB2BGR))
        else:
            action_n = [up for ob in observation_n]
        observation_n, reward_n, done_n, info = env.step(action_n)
        # print(done_n)

        print_som(som)
        env.render()
コード例 #35
0
def SOM_clustering():
    arr = np.array([])

    with open("AllTrajecsNopePoints.data") as f:
        for line in f:
            strings = line.split(" ")
            arrcol = np.array([])
            for str in strings:
                arrcol = np.concatenate((arrcol, np.array([float(str)])),
                                        axis=0)
            if len(arr) == 0:
                arr = np.concatenate((arr, arrcol), axis=0)
            else:
                arr = np.vstack((arr, arrcol))
            #print (arr)

    testX = arr

    print("Cluster Start")
    #Train a 20x30 SOM with 400 iterations
    som = SOM(20, 30, 720, 400)
    som.train(testX)

    print("get_centroids")
    #Get output grid
    image_grid = som.get_centroids()
    #Write to file
    target = open("ClusteredCentroids.csv", 'w')
    count = 0
    for i, m in enumerate(image_grid):
        for column in m:
            for k, dim in enumerate(column):
                if (k != 0):
                    target.write(" ")
                target.write("%s" % (dim))
            target.write("\n")

    #        target.write("%s " % (n))
    #    target.write("\n")
    target.close()
    #for i, m in enumerate(image_grid):
    #    print("i m[1] m[0] =", i ,m[1], m[0])

    #Map colours to their closest neurons
    mapped = som.map_vects(testX)

    print("Plot")
    array = np.zeros((20, 30))
    #Plot
    #plt.imshow(image_grid)
    #plt.title('Color SOM')
    #plt.plot(20, 30, 'b.')
    target = open("ClusteredCabs_labels.csv", 'w')
    for i, m in enumerate(mapped):
        index = ((m[0]) * 30) + (m[1] + 1)
        target.write("%s\n" % (index))
        print(index)
        array[m[0], m[1]] += 1
    target.close()
    #print("i m[1] m[0] =", i ,m[1], m[0])
    #plt.plot(m[1], m[0], 'ro')
    #plt.text(m[1], m[0], index, ha='center', va='center',
    #        bbox=dict(facecolor='white', alpha=0.5, lw=0))
    #plt.show()

    array = array.astype(int)
    array = np.reshape(array, 600)
    print(array)
    #print (testX)

    ###Write to file
    ##target = open("ClusteredCabs.clu",'w')
    ##for i, m in enumerate(array):
    ##    #if m != 0 :
    ##        target.write("%s %s" % (i, m))
    ##        target.write("\n")
    ##target.close()
    return
コード例 #36
0
ファイル: SOMClustering.py プロジェクト: wenbo/MLBook
# -*- coding: utf-8 -*-

__author__="alexander"
__date__ ="$05.06.2011 21:30:11$"

from ImageContainer import ImageContainer
from ImageContainer import ImagePattern

from SOM import SOM

if __name__ == "__main__":
    imgContainer = ImageContainer()
    imgContainer.fromDirectory("/Users/alexander/Pictures/qwe/")
    #imgContainer.analyzeAll()
    som = SOM(3, 5, 5, None)
    som.clustering(imgContainer.images)

    for image in imgContainer.images:
        print som.coordinates(image)