示例#1
0
文件: io.py 项目: twistzzmc/AlgGeo
def ply2datadict(infile):
    """collect vertex coordinates and normals from input file"""
    datadict = {}
    with open(infile) as f:
        vertexcount = facecount = None
        while True:
            line = f.readline()
            if line.startswith("element vertex"):
                vertexcount = line.split()[-1]
            if line.startswith("element face"):
                facecount = line.split()[-1]
            if line.startswith("end_header"):
                break

        datadict['coords'] = []
        datadict['normals'] = []

        for i in xrange(int(vertexcount)):
            line = f.readline()
            x, y, z = line.split()
            datadict['coords'].append([float(x), float(y), float(z)])

        if facecount is not None:
            datadict['faces'] = []
            for i in xrange(int(facecount)):
                line = f.readline().split()
                vertex_ids = [int(x) for x in line[1:]]
                datadict['faces'].append(vertex_ids)

    return datadict
示例#2
0
def read_image(filename):
    f = open(filename, 'rb')

    index = 0
    buf = f.read()

    f.close()

    magic, images, rows, columns = struct.unpack_from('>IIII', buf, index)
    index += struct.calcsize('>IIII')

    for i in xrange(images):

        image = Image.new('L', (columns, rows))

        for x in xrange(rows):
            for y in xrange(columns):
                image.putpixel((y, x),
                               int(struct.unpack_from('>B', buf, index)[0]))
                index += struct.calcsize('>B')

        print('save ' + str(i) + 'image')
        if 'test' in filename:
            image.save(MINIST_TEST_IMG + str(i) + '.png')
        else:
            image.save(MINIST_TRAIN_IMG + str(i) + '.png')
示例#3
0
def clearNoise(image, G, N, Z):
    draw = ImageDraw.Draw(image)

    for i in xrange(0, Z):
        for x in xrange(1, image.size[0] - 1):
            for y in xrange(1, image.size[1] - 1):
                color = getPixel(image, x, y, G, N)
                if color != None:
                    draw.point((x, y), color)
示例#4
0
def EuclideanDistanceUpdateRule(V, k, iters=max_iter):
    m, n = V.shape
    W = np.random.random((m, k))
    H = np.random.random((k, n))

    for _ in xrange(iters):
        for a in xrange(k):
            for mu in xrange(n):
                H[a, mu] = H[a, mu] * np.dot(W.T, V)[a, mu] / np.dot(
                    np.dot(W.T, W), H)[a, mu]
            for i in xrange(m):
                W[i, a] = W[i, a] * np.dot(V, H.T)[i, a] / np.dot(
                    np.dot(W, H), H.T)[i, a]
    return W, H
示例#5
0
def DivergenceUpdateRule(V, k, iters=max_iter):
    m, n = V.shape
    W = np.random.random((m, k))
    H = np.random.random((k, n))
    for _ in xrange(iters):
        for a in xrange(k):
            for mu in xrange(n):
                H[a, mu] = H[a, mu] * sum(
                    W[:, a] * V[:, mu] / np.dot(W, H)[:, mu]) / sum(W[:, a])
            for i in xrange(m):
                W[i, a] = W[i, a] * sum(
                    H[a, :] * V[i, :] / np.dot(W, H)[i, :]) / sum(H[a, :])


#			H[a,:]/sum(H[a,:])
#			W[:,a]/sum(W[:,a])
    return W, H
示例#6
0
 def resize(self,L):
     """ update the number of hash tables to be used """
     if L < self.L:
         self.hash_tables = self.hash_tables[:L]
     else:
         # initialise a new hash table for each hash function
         hash_funcs = [[self.hash_family.create_hash_func() for h in xrange(self.k)] for l in xrange(self.L,L)]
         self.hash_tables.extend([(g,defaultdict(lambda:[])) for g in hash_funcs])
示例#7
0
def transform_data(predict, plus, minus):
    for i in xrange(predict.shape[0]):
        if predict[i] == 1:
            predict[i] = plus
        elif predict[i] == -1:
            predict[i] = minus

    return predict
示例#8
0
def ALSUpdate(V, k, iters=max_iter):
    import scipy.optimize.nnls
    m, n = V.shape
    W = np.random.random((m, k))
    H = np.random.random((k, n))

    # update H and W respectively
    for _ in xrange(k):
        for mu in xrange(n):
            #			H[:,mu] = scipy.optimize.nnls(W, V[:,mu])
            x1, _ = scipy.optimize.nnls(W, V[:, mu])
            #x1.shape = (x1.size,1)
            H[:, mu] = x1
        for i in xrange(m):
            tmp = np.zeros((k, 1))
            x2, _ = scipy.optimize.nnls(H.T, V[i, :].T)
            W[i, :] = x2.T
    return W, H
示例#9
0
 def largestOverlap(self, A, B):
     """
     :type A: List[List[int]]
     :type B: List[List[int]]
     :rtype: int
     """
     listA, listB = [], []
     for i in xrange(len(A)):
         for j in xrange(len(A[i])):
             if A[i][j]:
                 listA.append((i, j))
             if B[i][j]:
                 listB.append((i, j))
     difference = defaultdict(int)
     for ai, aj in listA:
         for bi, bj in listB:
             difference[ai - bi, aj - bj] += 1
     return max(difference.values()) if difference else 0
示例#10
0
def main():
    Train_0, Train_1, Train_2, Train_3, Train_4, Train_5, Train_6, Train_7, Train_8, Train_9 = \
        data_matching(x_train, y_train)

    prediction = []
    np_list = []

    np_predict = [
        Train_0, Train_1, Train_2, Train_3, Train_4, Train_5, Train_6, Train_7,
        Train_8, Train_9
    ]

    combination = list(
        itertools.combinations([0, 1, 2, 3, 4, 5, 6, 7, 8, 9], 2))

    for pair in combination:
        y1, y2 = generate_data(np_predict[pair[0]], np_predict[pair[1]])

        training_data = np.vstack((np_predict[pair[0]], np_predict[pair[1]]))
        test_data = np.hstack((y1, y2))

        clf = SVM(C=1)
        clf.train(training_data, test_data)

        y_predict = clf.predict(X_test)
        np_list.append(transform_data(y_predict, pair[0], pair[1]))

    np_list = np.array(np_list).astype(int)

    transpose = np.transpose(np_list)

    for row in xrange(transpose.shape[0]):
        counts = np.bincount(transpose[row])
        prediction.append(np.argmax(counts))

    prediction = np.array(prediction)

    correct = np.sum(prediction == y_test)

    cnf_matrix = confusion_matrix(y_test, prediction)
    abbreviation = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']

    ax = sns.heatmap(cnf_matrix, cmap=plt.cm.Greens, annot=True, fmt="d")
    ax.set_xticklabels(abbreviation)
    ax.set_yticklabels(abbreviation)
    plt.title('Confusion matrix of SVM_onevsone')
    plt.ylabel('True numbers')
    plt.xlabel('Predicted numbers')
    plt.show()

    size = len(y_predict)
    accuracy = (correct / float(size)) * 100

    print("%d out of %d predictions correct" % (correct, len(y_predict)))
    print("The accuracy is  ")
    print(accuracy, "%")
示例#11
0
def reverseBits2(n):
    """
    :type n: int
    :rtype: int
    """
    res = 0
    for i in xrange(32):
        res <<= 1
        res |= ((n >> i) & 1)
    return res
示例#12
0
    def train(self):
        """
    Run optimization to train the model.
    """
        num_train = self.X_train.shape[0]
        iterations_per_epoch = max(num_train / self.batch_size, 1)
        num_iterations = self.num_epochs * iterations_per_epoch

        for t in xrange(num_iterations):
            self._step()

            # Maybe print training loss
            if self.verbose and t % self.print_every == 0:
                print('(Iteration %d / %d) loss: %f' %
                      (t + 1, num_iterations, self.loss_history[-1]))

            # At the end of every epoch, increment the epoch counter and decay the
            # learning rate.
            epoch_end = (t + 1) % iterations_per_epoch == 0
            if epoch_end:
                self.epoch += 1
                for k in self.optim_configs:
                    self.optim_configs[k]['learning_rate'] *= self.lr_decay

            # Check train and val accuracy on the first iteration, the last
            # iteration, and at the end of each epoch.
            first_it = (t == 0)
            last_it = (t == num_iterations + 1)
            if first_it or last_it or epoch_end:
                train_acc = self.check_accuracy(self.X_train,
                                                self.y_train,
                                                num_samples=4)
                val_acc = self.check_accuracy(self.X_val,
                                              self.y_val,
                                              num_samples=4)
                self.train_acc_history.append(train_acc)
                self.val_acc_history.append(val_acc)

                if self.verbose:
                    print('(Epoch %d / %d) train acc: %f; val_acc: %f' %
                          (self.epoch, self.num_epochs, train_acc, val_acc))

                # Keep track of the best model
                if val_acc > self.best_val_acc:
                    self.best_val_acc = val_acc
                    self.best_params = {}
                    for k, v in self.model.params.iteritems():
                        self.best_params[k] = v.copy()

        # At the end of training swap the best params into the model
        self.model.params = self.best_params
示例#13
0
def similarities_unweighted(adj):
    """Get all the edge similarities. Input dict maps nodes to sets of neighbors.
    Output is a list of decorated edge-pairs, (1-sim,eij,eik), ordered by similarity.
    """
    print("computing similarities...")
    i_adj = dict((n, adj[n] | set([n])) for n in adj)  # node -> inclusive neighbors
    min_heap = []  # elements are (1-sim,eij,eik)
    for n in adj:  # n is the shared node
        if len(adj[n]) > 1:
            for i, j in combinations(adj[n], 2):  # all unordered pairs of neighbors
                edge_pair = swap(swap(i, n), swap(j, n))
                inc_ns_i, inc_ns_j = i_adj[i], i_adj[j]  # inclusive neighbors
                S = 1.0 * len(inc_ns_i & inc_ns_j) / len(inc_ns_i | inc_ns_j)  # Jacc similarity...
                heappush(min_heap, (1 - S, edge_pair))
    return [heappop(min_heap) for i in xrange(len(min_heap))]  # return ordered edge pairs
def contains_nudity(image_path):
    image = Image.open(image_path)
    imgPixels = image.load()
    width = image.size[0]
    height = image.size[1]
    pixels = [[None] * height for i in range(width)]

    for i in xrange(0, width):
        for j in xrange(0, height):
            pixels[i][j] = Pixel(i, j, imgPixels[i, j][0], imgPixels[i, j][1],
                                 imgPixels[i, j][2])

    skin_pixels = []
    skin_regions = []
    create_skin_regions(pixels, skin_pixels, skin_regions, width, height)

    if len(skin_regions) < 3:
        return False
    skin_regions.sort(key=operator.attrgetter('size'), reverse=True)

    bounding_region = create_bounding_region(pixels, skin_regions, width,
                                             height)
    return analyze_regions(skin_pixels, skin_regions, bounding_region, width,
                           height)
示例#15
0
    def check_accuracy(self, X, y, num_samples=None, batch_size=2):
        """
    Check accuracy of the model on the provided data.
    
    Inputs:
    - X: Array of data, of shape (N, d_1, ..., d_k)
    - y: Array of labels, of shape (N,)
    - num_samples: If not None, subsample the data and only test the model
      on num_samples datapoints.
    - batch_size: Split X and y into batches of this size to avoid using too
      much memory.
      
    Returns:
    - acc: Scalar giving the fraction of instances that were correctly
      classified by the model.
    """

        # Maybe subsample the data
        N = X.shape[0]
        if num_samples is not None and N > num_samples:
            mask = np.random.choice(N, num_samples)
            N = num_samples
            X = X[mask]
            y = y[mask]

        # Compute predictions in batches
        num_batches = N / batch_size
        if N % batch_size != 0:
            num_batches += 1
        y_pred = []
        for i in xrange(num_batches):
            start = i * batch_size
            end = (i + 1) * batch_size
            scores = self.model.loss(X[start:end])
            y_pred.append(np.argmax(scores, axis=1))
        y_pred = np.hstack(y_pred)
        acc = np.mean(y_pred == y)

        return acc
示例#16
0
def data_matching(x_train, y_train):
    Train_0 = []
    Train_1 = []
    Train_2 = []
    Train_3 = []
    Train_4 = []
    Train_5 = []
    Train_6 = []
    Train_7 = []
    Train_8 = []
    Train_9 = []

    for i in xrange(x_train.shape[0]):
        if y_train[i] == 0:
            Train_0.append(x_train[i])
        elif y_train[i] == 1:
            Train_1.append(x_train[i])
        elif y_train[i] == 2:
            Train_2.append(x_train[i])
        elif y_train[i] == 3:
            Train_3.append(x_train[i])
        elif y_train[i] == 4:
            Train_4.append(x_train[i])
        elif y_train[i] == 5:
            Train_5.append(x_train[i])
        elif y_train[i] == 6:
            Train_6.append(x_train[i])
        elif y_train[i] == 7:
            Train_7.append(x_train[i])
        elif y_train[i] == 8:
            Train_8.append(x_train[i])
        elif y_train[i] == 9:
            Train_9.append(x_train[i])

    return np.array(Train_0), np.array(Train_1), np.array(Train_2), np.array(Train_3), np.array(Train_4),\
           np.array(Train_5), np.array(Train_6), np.array(Train_7), np.array(Train_8), np.array(Train_9)
示例#17
0
def read_label(filename, saveFilename):
    f = open(filename, 'rb')
    index = 0
    buf = f.read()

    f.close()

    magic, labels = struct.unpack_from('>II', buf, index)
    index += struct.calcsize('>II')

    labelArr = [0] * labels

    for x in xrange(labels):

        labelArr[x] = int(struct.unpack_from('>B', buf, index)[0])
        index += struct.calcsize('>B')

    save = open(saveFilename, 'w')

    save.write(','.join(map(lambda x: str(x), labelArr)))
    save.write('\n')

    save.close()
    print('save labels success')
示例#18
0
                        correct += 1
                print("{0}\t{1}\t{2}\t{3}".format(L,k,float(correct)/100,float(lsh.get_avg_touched())/len(self.points)))

    def linear(self,q,metric,max_results):
        """ brute force search by linear scan """
        candidates = [(ix,metric(q,p)) for ix,p in enumerate(self.points)]
        return sorted(candidates,key=itemgetter(1))[:max_results]


if __name__ == "__main__":

    # create a test dataset of vectors of non-negative integers
    d = 5
    xmax = 20
    num_points = 1000
    points = [[random.randint(0,xmax) for i in xrange(d)] for j in xrange(num_points)]

    # seed the dataset with a fixed number of nearest neighbours
    # within a given small "radius"
    num_neighbours = 2
    radius = 0.1
    for point in points[:num_points]:
        for i in xrange(num_neighbours):
            points.append([x+random.uniform(-radius,radius) for x in point])

    # test lsh versus brute force comparison by running a grid
    # search for the best lsh parameter values for each family
    tester = LSHTester(points,points[:int(num_points/10)],num_neighbours)

    args = {'name':'L2',
            'metric':L2_norm,
示例#19
0
 def rand_vec(self):
     return [random.gauss(0,1) for i in xrange(self.d)]
示例#20
0
import pygame
from pygame.locals import *
from sys import exit
from random import randint

from xlwings import xrange

pygame.init()
screen = pygame.display.set_mode((640, 480), 0, 32)

while True:
    for event in pygame.event.get():
        if event.type == QUIT:
            exit()

    rand_col = (randint(0, 255), randint(0, 255), randint(0, 255))
    # screen.lock()    #很快你就会知道这两句lock和unlock的意思了
    for _ in xrange(100):
        rand_pos = (randint(0, 639), randint(0, 479))
        screen.set_at(rand_pos, rand_col)
    # screen.unlock()

    pygame.display.update()
# Setup optimizer
optimizer = optimizers.Adam()
optimizer.setup(model.collect_parameters())

train_loss = []
train_acc = []
test_loss = []
test_acc = []

l1_W = []
l2_W = []
l3_W = []

# Learning loop
for epoch in xrange(1, n_epoch + 1):
    print('epoch', epoch)

    # training
    # N個の順番をランダムに並び替える
    perm = np.random.permutation(N)
    sum_accuracy = 0
    sum_loss = 0
    # 0〜Nまでのデータをバッチサイズごとに使って学習
    for i in xrange(0, N, batchsize):
        x_batch = x_train[perm[i:i + batchsize]]
        y_batch = y_train[perm[i:i + batchsize]]

        # 勾配を初期化
        optimizer.zero_grads()
        # 順伝播させて誤差と精度を算出
示例#22
0
 def rand_partition(self):
     return [random.uniform(0,self.w) for i in xrange(self.d)]
示例#23
0
model.add(Activation('tanh'))
model.add(Dense(10, input_dim=10, init='uniform'))
model.add(Activation('tanh'))
model.add(Dense(1, input_dim=10, init='uniform'))
model.add(Activation('tanh'))

sgd = SGD(lr=0.1, decay=1e-3, momentum=0.5, nesterov=True)
model.compile(loss='mse', optimizer=sgd)

model.fit(X, Y, nb_epoch=10, batch_size=1)
score = model.evaluate(X2, Y2, batch_size=1)
preds = model.predict(X2, batch_size=1, verbose=0)

main(Y2, preds)

plt.plot(xrange(0, 231), preds, label='Observed')
plt.plot(xrange(0, 231), Y2, label='Expected')
plt.xlabel('Data Points')
plt.ylabel('PM 2.5')
plt.legend(loc='upper right')
plt.show()

A = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['PM 2.5'])
B = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['T'])
C = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['TM'])
D = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['Tm'])
E = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['SLP'])
F = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['H'])
G = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['VV'])
H = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['VM'])
I = pd.read_csv('Data/Train/Train_Combine.csv', usecols=['V'])
示例#24
0
c = []
for a in Y:
    for b in a:
        c.append(b)

clf = svm.SVC()
clf.fit(X, c)
preds = clf.predict(X2)

print("*********SVM***************")
print("Precision : ", precision_score(Y2, preds, average='binary'))
print("Recall : ", recall_score(Y2, preds, average='binary'))
print("F-Measure : ", f1_score(Y2, preds, average='binary'))
a = confusion_matrix(Y2, preds)
for i in xrange(len(a)):
    for j in xrange(len(a)):
        if i == j:
            sum += a[i][j]

print("Accuracy : ", (sum / len(Y2)) * 100)

sum = 0.0
# **********************************************************************

abc = LogisticRegression()
abc.fit(X, c)
pred = abc.predict(X2)

print("*********Logistic Regression***************")
print("Precision : ", precision_score(Y2, pred, average='binary'))
示例#25
0
# シグモイド関数
def sigmoid(z):
    return 1.0 / (1.0 + np.exp(-z))


# 特徴関数
def phi(x, y):
    return np.array([x, y, 1])


np.random.seed()  # 乱数を初期化
w = np.random.randn(3)
eta = 0.1

for i in xrange(50):
    list_ = list(range(N))
    random.shuffle(list_)

    misses = 0  # 予測を外した回数
    for n in list_:
        x_n, y_n = X[n, :]
        t_n = T[n]

        # 予測
        feature = phi(x_n, y_n)
        predict = sigmoid(np.inner(w, feature))

        w -= eta * (predict - t_n) * feature

    eta *= 0.9
示例#26
0
def L1_norm(u,v):
        return sum(abs(u[i]-v[i]) for i in xrange(len(u)))
示例#27
0
文件: io.py 项目: twistzzmc/AlgGeo
def datadict2dcel(datadict):
    #assume ccw vertex order
    hedges = {}  # he_id: (v_origin, v_end), f, nextedge, prevedge
    vertices = {}  # v_id: (e0,...,en) i.e. the edges originating from this v

    m = len(datadict['coords'])
    for i in xrange(m):
        vertices[i] = []

    # find all halfedges, keep track of their vertices and faces
    j = 0
    for i, face in enumerate(datadict['faces']):
        # face.reverse()
        n_vertices = len(face)

        for v_i in xrange(n_vertices):
            # store reference to this hedge in vertex list
            vertices[face[v_i]].append(j)

            if v_i == 0:
                hedges[j] = (face[v_i],
                             face[v_i + 1]), i, j + 1, j + (n_vertices - 1)
                vertices[face[v_i + 1]].append(j)
            elif v_i < n_vertices - 1:
                hedges[j] = (face[v_i], face[v_i + 1]), i, j + 1, j - 1
                vertices[face[v_i + 1]].append(j)
            else:
                hedges[j] = (face[v_i],
                             face[0]), i, j - (n_vertices - 1), j - 1
                vertices[face[0]].append(j)
            vertices[face[v_i]].append(j)
            j += 1

    D = DCEL()

    # create vertices for all points
    for v in datadict['coords']:
        dcel_v = D.createVertex(v[0], v[1], v[2])

    # create faces
    for f in xrange(len(datadict['faces'])):
        D.createFace()
    # the last face in the DCEL will be the infinite face:
    infinite_face = D.createInfFace()

    # create all edges except for the ones incident to the infinite face
    for e in xrange(len(hedges)):
        D.createHedge()

    inf_edge = None
    for this_edge, value in hedges.iteritems():
        v, face, nextedge, prevedge = value
        v_origin, v_end = v

        v_origin_edges = Set(vertices[v_origin])
        v_end_edges = Set(vertices[v_end])

        # print v_origin_edges, v_end_edges
        twin_edge = v_origin_edges.intersection(v_end_edges)
        twin_edge.discard(this_edge)

        e = D.hedgeList[this_edge]

        if len(twin_edge) == 0:  # oh that must be incident to infinite face...
            # face = infinite_face
            e_twin = D.createHedge()
            e_twin.setTopology(
                D.vertexList[v_end], e, infinite_face, None,
                None)  # oops, forgetting to set something here...
            inf_edge = e_twin
        else:
            e_twin = D.hedgeList[twin_edge.pop()]
        D.faceList[face].setTopology(e)

        e.setTopology(D.vertexList[v_origin], e_twin, D.faceList[face],
                      D.hedgeList[nextedge], D.hedgeList[prevedge])
        e.origin.setTopology(e)

    # now fix prev/next refs for all edges incident to inf face
    infinite_face.innerComponent = inf_edge
    current_edge = last_correct_edge = inf_edge

    while inf_edge.previous == None:

        current_edge = last_correct_edge
        while current_edge.twin.incidentFace != infinite_face:
            current_edge = current_edge.twin.previous
        current_edge = current_edge.twin

        last_correct_edge.next = current_edge
        current_edge.previous = last_correct_edge
        last_correct_edge = current_edge

    return D
示例#28
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-

#######################
# 打造世界上最完美的机器人
#######################

from xlwings import xrange

#
# ls = [1,3,2,9,9,3]
# ls.remove(9)
# print(sorted(ls))
# ls = sorted(ls)
# print(ls)
#

node = [1, 2, 3, 4]
for val in xrange(len(node)):
    print(val)
示例#29
0
from xlwings import xrange
n = int(input())
board = []
for i in range(n):
    s = input().split(",")
    for j in range(len(s)):
        s[j] = int(s[j])
    board.append(s)
n = len(board)
rcount = ccount = 0
for j in xrange(n):
    if board[0][j] == 1:
        rcount += 1
    if board[j][0] == 1:
        ccount += 1
if n % 2 == 0:
    if rcount != n / 2 or ccount != n / 2:
        print(-1)
else:
    if (rcount != n // 2
            and rcount != n // 2 + 1) or (ccount != n // 2
                                          and ccount != n // 2 + 1):
        print(-1)
for i in xrange(1, n):
    rsame = board[i][0] == board[i - 1][0]
    csame = board[0][i] == board[0][i - 1]
    for j in xrange(1, n):
        if (rsame and board[i][j] != board[i - 1][j]) or (
                not rsame and board[i][j] == board[i - 1][j]):
            print(-1)
        if (csame and board[j][i] != board[j][i - 1]) or (
示例#30
0
# read json
print('loading json')
data = json.loads(content)
print('success!')

# create file
print('creating csv')
fieldnames = ["Id", "Latitude", "Longitude", "Date"]
w = open('history' + ID + '.csv', 'w', newline='')
wr = csv.writer(w, delimiter=',')
wr.writerow(fieldnames)

count = len(data['locations'])

for c in xrange(0, count):

    # convert current location to standard latitude/longitude
    latitude = float(data['locations'][c]['latitudeE7']) / 10000000
    longitude = float(data['locations'][c]['longitudeE7']) / 10000000
    # convert time stamp from mills to date
    time = float(data['locations'][c]['timestampMs'])

    # get next location, except if this is the last of the list
    if c < count - 1:
        nextLat = float(data['locations'][c + 1]['latitudeE7']) / 10000000
        nextLong = float(data['locations'][c + 1]['longitudeE7']) / 10000000
    else:
        nextLat = latitude
        nextLong = longitude