Esempio n. 1
0
    def test_gmm_deterministic(self):
        from pyspark.mllib.clustering import GaussianMixture

        x = range(0, 100, 10)
        y = range(0, 100, 10)
        data = self.sc.parallelize([[a, b] for a, b in zip(x, y)])
        clusters1 = GaussianMixture.train(data, 5, convergenceTol=0.001, maxIterations=100, seed=63)
        clusters2 = GaussianMixture.train(data, 5, convergenceTol=0.001, maxIterations=100, seed=63)
        for c1, c2 in zip(clusters1.weights, clusters2.weights):
            self.assertEquals(round(c1, 7), round(c2, 7))
Esempio n. 2
0
    def test_gmm_with_initial_model(self):
        from pyspark.mllib.clustering import GaussianMixture
        data = self.sc.parallelize([
            (-10, -5), (-9, -4), (10, 5), (9, 4)
        ])

        gmm1 = GaussianMixture.train(data, 2, convergenceTol=0.001,
                                     maxIterations=10, seed=63)
        gmm2 = GaussianMixture.train(data, 2, convergenceTol=0.001,
                                     maxIterations=10, seed=63, initialModel=gmm1)
        self.assertAlmostEqual((gmm1.weights - gmm2.weights).sum(), 0.0)
Esempio n. 3
0
def gmm_spark(sc, X=None, clusters=3):
	if X is None:
		X = users_as_parallelizable_sparse_data(users)
	X = sc.parallelize(X)
	gmm = GaussianMixture.train(X, k=clusters)
	for i in range(2):
		print ("weight = ", gmm.weights[i], "mu = ", gmm.gaussians[i].mu, "sigma = ", gmm.gaussians[i].sigma.toArray())
Esempio n. 4
0
    def test_gmm(self):
        from pyspark.mllib.clustering import GaussianMixture

        data = self.sc.parallelize([[1, 2], [8, 9], [-4, -3], [-6, -7]])
        clusters = GaussianMixture.train(data, 2, convergenceTol=0.001, maxIterations=100, seed=56)
        labels = clusters.predict(data).collect()
        self.assertEquals(labels[0], labels[1])
        self.assertEquals(labels[2], labels[3])
Esempio n. 5
0
                        default=1e-3,
                        type=float,
                        help='convergence threshold')
    parser.add_argument('--maxIterations',
                        default=100,
                        type=int,
                        help='Number of iterations')
    parser.add_argument('--seed',
                        default=random.getrandbits(19),
                        type=long,
                        help='Random seed')
    args = parser.parse_args()

    conf = SparkConf().setAppName("GMM")
    sc = SparkContext(conf=conf)

    lines = sc.textFile(args.inputFile)
    data = lines.map(parseVector)
    model = GaussianMixture.train(data, args.k, args.convergenceTol,
                                  args.maxIterations, args.seed)
    for i in range(args.k):
        print(("weight = ", model.weights[i], "mu = ", model.gaussians[i].mu,
               "sigma = ", model.gaussians[i].sigma.toArray()))
    print("\n")
    print((
        "The membership value of each vector to all mixture components (first 100): ",
        model.predictSoft(data).take(100)))
    print("\n")
    print(("Cluster labels (first 100): ", model.predict(data).take(100)))
    sc.stop()
    :param convergenceTol:   Convergence threshold. Default to 1e-3
    :param maxIterations:    Number of EM iterations to perform. Default to 100
    :param seed:             Random seed
    """

    parser = argparse.ArgumentParser()
    parser.add_argument('inputFile', help='Input File')
    parser.add_argument('k', type=int, help='Number of clusters')
    parser.add_argument('--convergenceTol', default=1e-3, type=float, help='convergence threshold')
    parser.add_argument('--maxIterations', default=100, type=int, help='Number of iterations')
    parser.add_argument('--seed', default=random.getrandbits(19),
                        type=long, help='Random seed')
    args = parser.parse_args()

    conf = SparkConf().setAppName("GMM")
    sc = SparkContext(conf=conf)

    lines = sc.textFile(args.inputFile)
    data = lines.map(parseVector)
    model = GaussianMixture.train(data, args.k, args.convergenceTol,
                                  args.maxIterations, args.seed)
    for i in range(args.k):
        print(("weight = ", model.weights[i], "mu = ", model.gaussians[i].mu,
               "sigma = ", model.gaussians[i].sigma.toArray()))
    print("\n")
    print(("The membership value of each vector to all mixture components (first 100): ",
           model.predictSoft(data).take(100)))
    print("\n")
    print(("Cluster labels (first 100): ", model.predict(data).take(100)))
    sc.stop()
Esempio n. 7
0
df = pd.DataFrame(l, index = ['gp1_P', 'gp2_P', 'gp3_P', 'gp4_P', 'gp5_P', 'gp6_P'],
                  columns = ['gp1_R', 'gp2_R', 'gp3_R', 'gp4_R', 'gp5_R', 'gp6_R'])
df


# ### Interprétation (à finir)
Avec Kmeans, 2 groupes se distinguent : 4 et 6
Le groupe gp1_P regroupe 123 des individus et mélange nettement gp1_R / gp2_R / gp3_R
# ## Gaussian Mixture 

# In[12]:

from pyspark.mllib.clustering import GaussianMixture

# Construction du model avc le mm dataTrain que Kmeans
gmm = GaussianMixture.train(dataTrain, 6)

# sortie des parameters du modele
for i in range(2):
    print ("weight = ", gmm.weights[i], "mu = ", gmm.gaussians[i].mu,
        "sigma = ", gmm.gaussians[i].sigma.toArray())


# ### Interprétation (à finir)

# # Mesures d'évaluation (en cours)

# In[30]:

from pyspark.mllib.evaluation import MultilabelMetrics
# $example off$

from pyspark import SparkContext
# $example on$
from pyspark.mllib.clustering import GaussianMixture, GaussianMixtureModel
# $example off$

if __name__ == "__main__":
    sc = SparkContext(appName="GaussianMixtureExample")  # SparkContext

    # $example on$
    # Load and parse the data
    data = sc.textFile("data/mllib/gmm_data.txt")
    parsedData = data.map(lambda line: array([float(x) for x in line.strip().split(' ')]))

    # Build the model (cluster the data)
    gmm = GaussianMixture.train(parsedData, 2)

    # Save and load model
    gmm.save(sc, "target/org/apache/spark/PythonGaussianMixtureExample/GaussianMixtureModel")
    sameModel = GaussianMixtureModel\
        .load(sc, "target/org/apache/spark/PythonGaussianMixtureExample/GaussianMixtureModel")

    # output parameters of model
    for i in range(2):
        print("weight = ", gmm.weights[i], "mu = ", gmm.gaussians[i].mu,
              "sigma = ", gmm.gaussians[i].sigma.toArray())
    # $example off$

    sc.stop()
Esempio n. 9
0
# -*- coding:utf-8 -*-
""""
Program: GMM
Description: 调用spark内置的GMM算法示例
Author: zhenglei - [email protected]
Date: 2016-01-14 13:38:58
Last modified: 2016-01-14 13:50:11
Python release: 2.7
"""
# 调用spark内部的kmeans算法实现完成机器学习实战中的第十章示例
from numpy import array
from pyspark import SparkContext
from pyspark.mllib.clustering import GaussianMixture


if __name__ == '__main__':
    sc = SparkContext()
    datas = sc.textFile('testSet.txt')
    clusters_num = 4
    parseData = datas.map(lambda x: array([float(y) for y in x.split('\t')]))
    model = GaussianMixture.train(parseData, clusters_num, maxIterations=10)
    clusters = [[] for i in range(clusters_num)]
    labels = model.predict(parseData).collect()
    nums = len(labels)
    for i in xrange(nums):
        clusters[labels[i]].append(parseData.collect()[i])
    print clusters
    sc.stop()
Esempio n. 10
0
#    print data1.take(5)
# Without converting the features into dense vectors, transformation with zero mean will raise
# exception on sparse vector.
# data2 will be unit variance and zero mean.
    data2 = label.zip(scaler1.transform(features.map(lambda x: Vectors.dense(x.toArray()))))
    parsedData = data2.map (lambda x: x[1])
    parsedData.cache()
    modelList = [];
    d = dict()

    noClusters = 5
    convergenceTol = 1e-3
    maxIterations = 1000
    seed = random.getrandbits(19)
# Build the model (cluster the data)
    gmm = GaussianMixture.train(parsedData, noClusters, convergenceTol,
                                  maxIterations, seed)
# output parameters of model
    for i in range(noOfClusters):
        print ("weight = ", gmm.weights[i], "mu = ", gmm.gaussians[i].mu,
            "sigma = ", gmm.gaussians[i].sigma.toArray())
    """
    for clusterSize in range(2, 21, 2):
    # Build the model (cluster the data)
        clusters = KMeans.train(parsedData, clusterSize, maxIterations=10,runs=10, initializationMode="random")
        modelList.append(clusters)

    # Evaluate clustering by computing Within Set Sum of Squared Errors
        def error(point):
            center = clusters.centers[clusters.predict(point)]
            return sqrt(sum([x**2 for x in (point - center)]))
Esempio n. 11
0
    ### Local default options
    k = 2  # "k" (int) Set the number of Gaussians in the mixture model.  Default: 2
    convergenceTol = 0.001  # "convergenceTol" (double) Set the largest change in log-likelihood at which convergence is considered to have occurred.
    maxIterations = 150  # "maxIterations" (int) Set the maximum number of iterations to run. Default: 100
    seed = None  # "seed" (long) Set the random seed

    # Load and parse the data
    data = sc.textFile("/var/mdp-cloud/gmm_data.txt")
    parsedData = data.map(
        lambda line: array([float(x) for x in line.strip().split(' ')]))
    # filteredData = data.filter(lambda arr: int(arr[1]) != 0)

    # Build and save the model (cluster the data)
    gmm = GaussianMixture.train(parsedData,
                                k,
                                convergenceTol=0.001,
                                maxIterations=150,
                                seed=None)
    # gmm.save(sc, "target/org/apache/spark/PythonGaussianMixtureExample/GaussianMixtureModel")
    # gmm.save(sc, "GaussianMixtureModel_CV")
    # The following line would load the model
    # sameModel = GaussianMixtureModel.load(sc, "target/org/apache/spark/PythonGaussianMixtureExample/GaussianMixtureModel")

    # output parameters of model
    for i in range(k):
        print("weight = ", gmm.weights[i], "mu = ", gmm.gaussians[i].mu,
              "sigma = ", gmm.gaussians[i].sigma.toArray())

    sc.stop()
            "   opp_score " \
            "FROM team_avgs"
    query = "SELECT " \
            "   team_id, " \
            "   team_name, " \
            "   AVG(t1_rush), " \
            "   AVG(t1_pass), " \
            "   AVG(t2_rush), " \
            "   AVG(t2_pass) " \
            "FROM full_game_stats " \
            "JOIN team ON 1=1 " \
            "   AND full_game_stats.t1_id = team.team_id " \
            "GROUP BY team_id, team_name"
    curs.execute(query)
    sql_dat = curs.fetchall()
    team_ids = [row[0] for row in sql_dat]
    team_names = [row[1] for row in sql_dat]
    features = [row[2:] for row in sql_dat]

    data = sc.parallelize(features, 1)
    model = GaussianMixture.train(data, k=10)
    cluster_labels = model.predict(data).collect()


    labels = zip(team_ids,team_names, cluster_labels)
    df = spark.createDataFrame( labels,
                        ["team_id", "team_name", "cluster_id"] )
    df.createOrReplaceTempView("model")
    for k in range(10):
        spark.sql("SELECT * FROM model WHERE cluster_id = {}".format(k)).show()
Esempio n. 13
0
                                maxIterations=100,
                                initialModel=KMeansModel(initial_centroids))
    end = time()
    elapsed_time = end - start
    kmeans_output = [
        "====================== KMeans ====================\n",
        "Final centers: " + str(kmeans_model.clusterCenters),
        "Total Cost: " + str(kmeans_model.computeCost(data)),
        "Value of K: " + str(k),
        "Elapsed time: %0.10f seconds." % elapsed_time
    ]
    #path = "hdfs://masterNode:9000/user/spark/MODELOS-marcelo/KMEANS-2"
    #kmeans_model.save(sc,path)
    # Gauss KMeans
    start = time()
    gauss_model = GaussianMixture.train(data, k, maxIterations=20)
    end = time()
    elapsed_time = end - start
    gauss_output = [
        "====================== Gauss KMeans ====================\n"
    ]
    for i in range(k):
        v1 = ("weight = ", gauss_model.weights[i])
        v2 = ("mu = ", gauss_model.gaussians[i].mu)
        v3 = ("sigma = ", gauss_model.gaussians[i].sigma.toArray())
        gauss_output.append((v1, v2, v3))
    tiempo = "Tiempo: " + str(elapsed_time)
    gauss_output.append(tiempo)

    kmeans_info = sc.parallelize(kmeans_output)
    gauss_info = sc.parallelize(gauss_output)
Esempio n. 14
0
# -*- coding:utf-8 -*-
""""
Program: GMM
Description: 调用spark内置的GMM算法示例
Author: zhenglei - [email protected]
Date: 2016-01-14 13:38:58
Last modified: 2016-01-14 13:50:11
Python release: 2.7
"""
# 调用spark内部的kmeans算法实现完成机器学习实战中的第十章示例
from numpy import array
from pyspark import SparkContext
from pyspark.mllib.clustering import GaussianMixture

if __name__ == '__main__':
    sc = SparkContext()
    datas = sc.textFile('testSet.txt')
    clusters_num = 4
    parseData = datas.map(lambda x: array([float(y) for y in x.split('\t')]))
    model = GaussianMixture.train(parseData, clusters_num, maxIterations=10)
    clusters = [[] for i in range(clusters_num)]
    labels = model.predict(parseData).collect()
    nums = len(labels)
    for i in xrange(nums):
        clusters[labels[i]].append(parseData.collect()[i])
    print clusters
    sc.stop()
Esempio n. 15
0
from pyspark.mllib.clustering import GaussianMixture
from pyspark import SparkContext
from scipy.stats import mvn
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import time

DIR = "/home/adrianj/Desktop/MachineLearning/Resources/"
FILE_PATH = DIR+"atemporalTest.txt"
NUM_GAUSSIANS = 500

sc = SparkContext(appName="GMM Trainer")
data = sc.textFile(FILE_PATH)
parsedData = data.map(lambda line: np.array([float(x) for x in line.strip().split(' ')]))
gmm = GaussianMixture.train(parsedData, NUM_GAUSSIANS, seed=10)

print("Dumping to "+DIR+"GMMA/...")
#fig = plt.figure()
#ax = fig.gca(projection='3d')
# Record the model
gmm.save(sc, DIR+"GMMA/")
'''
for i in range(NUM_GAUSSIANS):
	
	mu = gmm.gaussians[i].mu
	sigma = (gmm.gaussians[i].sigma).toArray()
	weight = gmm.weights[i]
	#a, b = np.random.multivariate_normal(mu, sigma, 5000).T
	#surf = ax.scatter(a, b, c, zdir='z')
	#plt.plot(a, b, "x")
Esempio n. 16
0
import numpy as np


def parse(data):
    list = []
    for i in range(len(data)):
        value = float(data[i][1:-1])
        list.append(value)
    return (list)


parsedata = outdata.map(lambda line: line.encode('utf-8').split(",")).map(
    lambda l: parse(l))

start_time = time.time()
gmm = GaussianMixture.train(parsedata, 80)
gmm.fit(parsedata)
print time.time() - start_time

#testing Gaussian mixture model for python
start_time = time.time()
#print sample1

gmix = mixture.GMM(n_components=90, covariance_type='full')
gmix.fit(parsedata)
#gmix.predict(parsedInSample1)
end_time = time.time()
gmpython = end_time - start_time
print gmpython

Esempio n. 17
0
            elements = repo.get(pk_aids)
            for element in elements:
                for col_index, col in enumerate(cols):
                    if element.get(col) is not None:
                        rows[index].get(pk_aids)[col_index] = element.get(col)
                        print(element.get(col))
    for index, row in enumerate(rows):
        for pk_aids in row:
            if rows[index].get(pk_aids) is not None:
                if index == 0:
                    data = rows[index].get(pk_aids)
                else:
                    data = np.concatenate((data, rows[index].get(pk_aids)),
                                          axis=0)
    print(data)
    #Parameters:
    #data – RDD of data points
    #k – Number of components
    #convergenceTol – Threshold value to check the convergence criteria. Defaults to 1e-3
    #maxIterations – Number of iterations. Default to 100
    #seed – Random Seed
    #initialModel – GaussianMixtureModel for initializing learning
    model = GaussianMixture.train(data,
                                  10,
                                  convergenceTol=0.0001,
                                  maxIterations=50)

    labels = model.predict(data).collect()

    print
Esempio n. 18
0
today = dt.datetime.today()
spark_df = sc.parallelize(
    spark.read.json("Data/yelp_academic_dataset_user.json").select(
        "review_count", "average_stars", "yelping_since").rdd.map(lambda x: (x[
            0], x[1], (today - par.parse(x[2])).days)).collect()[:1700])
scaler = MinMaxScaler(inputCol="_1",\
         outputCol="scaled_1")
# Getting the input data
trial_df = spark_df.map(lambda x: pyspark.ml.linalg.Vectors.dense(x)).map(
    lambda x: (x, )).toDF()
scalerModel = scaler.fit(trial_df)
vector_df = scalerModel.transform(trial_df).select("scaled_1").rdd.map(
    lambda x: Vectors.dense(x))

# Initialize GMM
gmm = GaussianMixture.train(vector_df, k=4, maxIterations=20, seed=2018)

df = pandas.DataFrame({'features': [], 'cluster': []})
i = 0
for v in vector_df.collect():
    df.loc[i] = [[float(v[0]), float(v[1]), float(v[2])], int(gmm.predict(v))]
    i += 1

print df

df_with = spark.createDataFrame(
    spark.createDataFrame(df).rdd.map(
        lambda x: (x[0][0], x[0][1], x[0][2], int(x[1])))).toPandas()
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
scatter = ax.scatter(df_with['_1'],
Esempio n. 19
0
    row_num = info_df.filter(info_df.high == 'IT').count()

    for index, repo in enumerate(repos):
        for pk_aids in repo:
            elements = repo.get(pk_aids)
            for element in elements:
                for col_index, col in enumerate(cols):
                    if element.get(col) is not None:
                        rows[index].get(pk_aids)[col_index]=element.get(col)
                        print(element.get(col))
    for index, row in enumerate(rows):
        for pk_aids in row:
            if rows[index].get(pk_aids) is not None:
                if index == 0:
                    data = rows[index].get(pk_aids)
                else:
                    data = np.concatenate((data, rows[index].get(pk_aids)), axis=0)
    print(data)
    #Parameters:
    #data – RDD of data points
    #k – Number of components
    #convergenceTol – Threshold value to check the convergence criteria. Defaults to 1e-3
    #maxIterations – Number of iterations. Default to 100
    #seed – Random Seed
    #initialModel – GaussianMixtureModel for initializing learning
    model = GaussianMixture.train(data, 10, convergenceTol=0.0001,maxIterations=50)

    labels = model.predict(data).collect()

    print
Esempio n. 20
0
            gmm.gaussians[i].mu, gmm.gaussians[i].sigma.toArray()).pdf(x)
    # prob_x = gmm.predictSoft([x])
    # rs = np.prod(prob_x)
    return rs


if __name__ == "__main__":
    sc = SparkContext(appName="GaussianMixtureExample")  # SparkContext
    # $example on$
    # Load and parse the data
    data = sc.textFile(sys.argv[1])
    parsedData = data.map(lambda line: array(
        ([float(x) for x in line.strip().split(",")])[index]))

    # Build the model (cluster the data)
    gmm = GaussianMixture.train(parsedData, n_clusters)

    # Save and load model
    if (os.path.isdir('GMMResult')):
        shutil.rmtree('GMMResult')
    gmm.save(sc, "GMMResult")
    sameModel = GaussianMixtureModel.load(sc, "GMMResult")

    # output parameters of model
    for i in range(n_clusters):
        print("weight = ", gmm.weights[i], "mu = ", gmm.gaussians[i].mu,
              "sigma = ", gmm.gaussians[i].sigma.toArray())

    datfull = data.map(lambda line: array(
        ([float(x) for x in line.strip().split(",")])))
    dat = datfull.take(datfull.count())
from numpy import array
from pyspark import SparkContext
import matplotlib.pyplot as plt
import numpy as np
#plt.figure()


sc=SparkContext()

data=sc.textFile("./coord.txt")
#test_plot=np.genfromtxt("./coord.txt",delimiter=',',dtype=float)
#plt.plot(test_plot[:,1],test_plot[:,0],'ro')
#plt.show()
parsedData=data.map(lambda line: array([float(x) for x in line.strip().split(',')]))
l=3
gmm = GaussianMixture.train(parsedData,l)
#x=np.zeros(90000)
#y=np.zeros(90000)

#for i in range(0,l):
	#print "w= ",gmm.weights[i]
	#print "sigma= ",gmm.gaussians[i].sigma.toArray()
	#print "mu= ",gmm.gaussians[i].mu
	
#x1=gmm.weights[0]*np.random.multivariate_normal(gmm.gaussians[0].mu,gmm.gaussians[0].sigma.toArray(),90000)
#x2=gmm.weights[1]*np.random.multivariate_normal(gmm.gaussians[1].mu,gmm.gaussians[1].sigma.toArray(),90000)		


file  = open("./GMM.txt",'w')
for j in range(0,l):
	file.write(str(gmm.weights[j])+'\n')
Esempio n. 22
0
from pyspark import SparkContext
from pyspark.mllib.clustering import GaussianMixture, GaussianMixtureModel

if __name__ == "__main__":
    sc = SparkContext(appName="GaussianMixtureExample")  # SparkContext

    ### Local default options
    k=2 # "k" (int) Set the number of Gaussians in the mixture model.  Default: 2
    convergenceTol=0.001 # "convergenceTol" (double) Set the largest change in log-likelihood at which convergence is considered to have occurred.
    maxIterations=150 # "maxIterations" (int) Set the maximum number of iterations to run. Default: 100
    seed=None # "seed" (long) Set the random seed

    # Load and parse the data    
    data = sc.textFile("/var/mdp-cloud/gmm_data.txt")
    parsedData = data.map(lambda line: array([float(x) for x in line.strip().split(' ')])) 
    # filteredData = data.filter(lambda arr: int(arr[1]) != 0)	

    # Build and save the model (cluster the data)
    gmm = GaussianMixture.train(parsedData, k, convergenceTol=0.001, maxIterations=150, seed=None)
    # gmm.save(sc, "target/org/apache/spark/PythonGaussianMixtureExample/GaussianMixtureModel")
    # gmm.save(sc, "GaussianMixtureModel_CV")
    # The following line would load the model
    # sameModel = GaussianMixtureModel.load(sc, "target/org/apache/spark/PythonGaussianMixtureExample/GaussianMixtureModel")

    # output parameters of model
    for i in range(k):
        print("weight = ", gmm.weights[i], "mu = ", gmm.gaussians[i].mu,
              "sigma = ", gmm.gaussians[i].sigma.toArray())

    sc.stop()