def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None):
        """Train the neural network using mini-batch stochastic
        gradient descent.  The ``training_data`` is a list of tuples
        ``(x, y)`` representing the training inputs and the desired
        outputs.  The other non-optional parameters are
        self-explanatory.  If ``test_data`` is provided then the
        network will be evaluated against the test data after each
        epoch, and partial progress printed out.  This is useful for
        tracking progress, but slows things down substantially."""

        test_data = list(test_data)
        training_data = list(training_data)
        if test_data: n_test = len(test_data)
        n = len(training_data)
        for j in xrange(epochs):
            test_data = list(test_data)
            random.shuffle(training_data)
            mini_batches = [
                training_data[k:k + mini_batch_size]
                for k in xrange(0, n, mini_batch_size)
            ]
            for mini_batch in mini_batches:
                self.update_mini_batch(mini_batch, eta)
            if test_data:
                print("Epoch {0}: {1} / {2}".format(j,
                                                    self.evaluate(test_data),
                                                    n_test))
            else:
                print("Epoch {0} complete".format(j))
示例#2
0
def svm_loss_naive(W, X, y, reg):
    d, C = W.shape
    _, N = X.shape

    ## naive loss and grad
    loss = 0
    dW = np.zeros_like(W)
    for n in xrange(N):
        xn = X[:, n]
        score = W.T.dot(xn)
        for j in xrange(C):
            if j == y[n]:
                continue
            margin = 1 - score[y[n]] + score[j]
            if margin > 0:
                loss += margin
                dW[:, j] += xn
                dW[:, y[n]] -= xn

    loss /= N
    loss += 0.5 * reg * np.sum(W * W)  # regularization

    dW /= N
    dW += reg * W  # gradient off regularization
    return loss, dW
示例#3
0
def search(max_gens,
           search_space,
           pop_size,
           p_cross,
           p_mut,
           max_local_gens,
           p_local,
           bits_per_param=16):
    pop = []
    for i in xrange(0, pop_size):
        pop.append({
            'bitstring':
            random_bitstring(len(search_space) * bits_per_param)
        })
        fitness(pop[i], search_space, bits_per_param)
    pop.sort(key=lambda x: x['fitness'])
    gen, best = 0, pop[0]
    for gen in xrange(max_gens):
        selected = [binary_tournament(pop) for i in xrange(0, pop_size)]
        children = reproduce(selected, pop_size, p_cross, p_mut)
        pop = create_populations(bits_per_param, children, max_local_gens,
                                 p_mut, pop, search_space, p_local)
        pop.sort(key=lambda x: x['fitness'])
        if pop[i]['fitness'] <= best['fitness']:
            best = pop[i]
        print(" > gen %d, best: fitness, best: bitstring: %s, %s" %
              (gen, best['fitness'], best['bitstring']))

    return best
def question1(s, t):
    # lets make sure s is a sting
    if type(s) != str:
        return "Please update the value of s and try again, s doesn't seem to be a string"

    # lets make sure t is also a string
    if type(t) != str:
        return "Please update the value of t and try again, t doesn't seem to be a string"

    if len(s) == 0 or len(s) < len(t):
        return False

    if len(t) == 0:
        return True

    # Storing character counts of t
    char_counts_t = {}
    for i in t:
        if i in char_counts_t:
            char_counts_t[i] += 1
        else:
            char_counts_t[i] = 1

    # Storing character counts length of t characters of s
    char_counts_s = {}
    for i in xrange(len(t)):

        if s[i] in char_counts_s:
            char_counts_s[s[i]] += 1
        else:
            char_counts_s[s[i]] = 1

    # Lets use compare to compare if they are anagrams
    if compare(char_counts_t, char_counts_s.copy()):
        return True

    # compare the rest of sets
    for i in xrange(len(t), len(s)):

        # add new character in the set
        if s[i] in char_counts_s:
            char_counts_s[s[i]] += 1
        else:
            char_counts_s[s[i]] = 1

        # remove old character in the set
        j = i - len(t)
        char_counts_s[s[j]] -= 1
        if char_counts_s[s[j]] == 0:
            char_counts_s.pop(s[j])

        # compare if they are anagrams
        if compare(char_counts_t, char_counts_s.copy()):
            return True

    # There is no matching anagram of t in consecutive substring of s
    return False
def inner_loop(a, gap):
    values = []
    for i in xrange(gap, len(a)):
        iter = 0
        for j in xrange(i, gap - 1, -gap):
            iter += 1
            if a[j - gap] < a[j]:
                break
            a[j], a[j - gap] = a[j - gap], a[j]
        values.append(iter)
    return values
def shellsort(a, v, seq, Show_dict=False, Show_Maximized_Dict=False, Show_Itarations=False, Show_Hist=False):
    """

    :param a: array with constant distribution
    :param v:
    :param seq: 1 - shell
                2 - pratt1
                3 - pratt2
                4 - fibonacci
                5 - sedgewick
    :param Show_dict:
    :param Show_Maximized_Dict:
    :param Show_Itarations:
    :param Show_Hist:
    :return:
    """
    dict = {}
    ks = []
    list_of_dicts = []
    gaps = choose_seq(seq,a)

    for gap in gaps:
        values = []
        for i in xrange(gap, len(a)):
            iter = 0
            for j in xrange(i, gap - 1, -gap):
                iter += 1
                if a[j - gap] < a[j]:
                    break
                a[j], a[j - gap] = a[j - gap], a[j]
            values.append(iter)

        ks.append(gap)
        dict.update({gap: values})
    list_of_dicts.append(dict)

    max_dic = maximize(dict, v)

    if Show_dict is True: print(dict)
    if Show_Maximized_Dict is True: print_maxim_dict(max_dic)

    if Show_Itarations is True:
        Ivect = sum_up(max_dic)
        Iorigin = sum_up(dict)
        I = Iorigin / Ivect
        print('Кол-во итераций внутр. цикла в невекторизованном коде: Iorigin = ' + str(Iorigin))
        print('Кол-во итераций внутр. цикла в векторизованном коде: Ivect = ' + str(Ivect))
        print('Отношение числа итераций Iorigin/Ivect = ' + str(I))

    if Show_Hist is True:
        get_splot(max_dic, ks)

    return I
示例#7
0
def SGD(self, training_data, epochs, mini_batch_size, eta, test_data=None):
    if test_data: n_test = len(test_data)
    n = len(training_data)
    for j in xrange(epochs):
        random.shuffle(training_data)
        mini_batches = [training_data[k:k+mini_batch_size] for k in xrange(0, n, mini_batch_size)]
        for mini_batch in mini_batches:
            self.update_mini_batch(mini_batch, eta)
        if test_data:
            print "Epoch {0}: {1} / {2}".format(j, self.evaluate(test_data), n_test)
        else:
            print "Epoch {0} complete".format(j)
示例#8
0
 def filter_subList_byList2Str_Regex(list2Str_In, listIntColumns_In, listStr_RegexFilters_In):
     
     strLocation = Witness.WitnessSys.clsStrWitnessLocation + "filter_subList_byList2Str_Regex: "
     listOutput=[]
     for list_Index_1 in xrange(0, len(list2Str_In)):
         listRes = []
         for int_Index_t in xrange(0, len(listIntColumns_In)):
             if len(list2Str_In[list_Index_1])>listIntColumns_In[int_Index_t]:
                 listRes.append(re.match(pattern = listStr_RegexFilters_In[int_Index_t], string = list2Str_In[list_Index_1][listIntColumns_In[int_Index_t]]))
         if len(listRes) and all(listRes):
             #print (strLocation + Witness.WitnessSys.clsStrWitnessValues + " found list at list index: " + str(list_Index_1))   
             listOutput.append(list2Str_In[list_Index_1])
     return listOutput
示例#9
0
def basic():
    dn = 0
    step = 0
    progress = 0
    while dn < 500:
        tn = sum([x for x in xrange(step + 1)])
        s = [x for x in xrange(1, tn + 1) if tn % x == 0]
        dn = len(s)
        step += 1
        if dn > progress:
            progress = dn
            print("%s: %s\n" % (str(tn), str(progress)))
    print(tn)
 def longestCommonPrefix(self, strs):
     """
     :type strs: List[str]
     :rtype: str
     """
     if not strs:
         return ''
     res = ''
     for i in xrange(len(strs[0])):
         for j in xrange(1, len(strs)):
             if i >= len(strs[j]) or strs[j][i] != strs[0][i]:
                 return res
         res += strs[0][i]
     return res
示例#11
0
 def updateW(self):
     for n in xrange(self.n_users):
         item_ids, ratings = self.get_items_rated_by_user(n)
         Xn = self.X[item_ids, :]
         grad_wn = -Xn.T.dot(ratings - Xn.dot(self.W[:, n])) / self.n_ratings + \
                   self.lam * self.W[:, n]
         self.W[:, n] -= self.learning_rate * grad_wn.reshape((self.K, ))
示例#12
0
 def updateX(self):
     for m in xrange(self.n_items):
         user_ids, ratings = self.get_users_who_rate_item(m)
         Wm = self.W[:, user_ids]
         grad_xm = -(ratings - self.X[m, :].dot(Wm)).dot(Wm.T) / self.n_ratings + \
                   self.lam * self.X[m, :]
         self.X[m, :] -= self.learning_rate * grad_xm.reshape((self.K, ))
示例#13
0
    def normalize_Y(self):
        if self.user_based:
            user_col = 0
            item_col = 1
            n_objects = self.n_users
        else:
            user_col = 1
            item_col = 0
            n_objects = self.n_items

        users = self.Y_raw_data[:, user_col]
        self.mu = np.zeros((n_objects, ))
        for n in xrange(n_objects):
            # row indices of rating done by user n
            # since indices need to be integers, we need to convert
            ids = np.where(users == n)[0].astype(np.int32)
            # indices of all ratings associated with user n
            item_ids = self.Y_data_n[ids, item_col]
            # and the corresponding ratings
            ratings = self.Y_data_n[ids, 2]
            # take mean
            m = np.mean(ratings)
            if np.isnan(m):
                m = 0  # to avoid empty array and nan value
            self.mu[n] = m
            # normalize
            self.Y_data_n[ids, 2] = ratings - self.mu[n]
示例#14
0
def logistic_regression(features,
                        target,
                        num_steps,
                        learning_rate,
                        add_intercept=False):
    if add_intercept:
        intercept = np.ones((features.shape[0], 1))
        features = np.hstack((intercept, features))

        weights = np.zeros(features.shape[1])

    for step in xrange(num_steps):
        scores = np.dot(features, weights)
        predictions = sigmoid(scores)

        # Update weights with gradient
        output_error_signal = target - predictions
        gradient = np.dot(features.T, output_error_signal)
        weights += learning_rate * gradient

# Print log-likelihood every so often
    if step % 10000 == 0:
        print(log_likelihood(features, target, weights))

        return weights
示例#15
0
def question5(ll, m):
    # ll should be a Node
    if type(ll) != Node:
        return "Failed: ll is not a Node!"

    # m should an integer
    if type(m) != int:
        return "Failed: m is not an integer!"

    # length of ll
    length_ll = get_length(ll)

    #ll should not be circular
    if length_ll == -1:
        return "Failed: circular linked list!"

    # make sure m is less than or equal to the length of ll
    if length_ll < m:
        return "Failed: m is greater than the length of ll"

    # traverse to the last mth element
    current_node = ll
    for i in xrange(length_ll - m):
        current_node = current_node.next

    return current_node.data
 def backprop(self, x, y):
     """Return a tuple ``(nabla_b, nabla_w)`` representing the
     gradient for the cost function C_x.  ``nabla_b`` and
     ``nabla_w`` are layer-by-layer lists of numpy arrays, similar
     to ``self.biases`` and ``self.weights``."""
     nabla_b = [np.zeros(b.shape) for b in self.biases]
     nabla_w = [np.zeros(w.shape) for w in self.weights]
     # feedforward
     activation = x
     activations = [x]  # list to store all the activations, layer by layer
     zs = []  # list to store all the z vectors, layer by layer
     for b, w in zip(self.biases, self.weights):
         z = np.dot(w, activation) + b
         zs.append(z)
         activation = sigmoid(z)
         activations.append(activation)
     # backward pass
     delta = self.cost_derivative(activations[-1], y) * \
         sigmoid_prime(zs[-1])
     nabla_b[-1] = delta
     nabla_w[-1] = np.dot(delta, activations[-2].transpose())
     # Note that the variable l in the loop below is used a little
     # differently to the notation in Chapter 2 of the book.  Here,
     # l = 1 means the last layer of neurons, l = 2 is the
     # second-last layer, and so on.  It's a renumbering of the
     # scheme in the book, used here to take advantage of the fact
     # that Python can use negative indices in lists.
     for l in xrange(2, self.num_layers):
         z = zs[-l]
         sp = sigmoid_prime(z)
         delta = np.dot(self.weights[-l + 1].transpose(), delta) * sp
         nabla_b[-l] = delta
         nabla_w[-l] = np.dot(delta, activations[-l - 1].transpose())
     return (nabla_b, nabla_w)
示例#17
0
def point_mutation(bitstring, rate):
    rate = 1.0 / len(bitstring)
    child = ""
    for i in xrange(0, len(bitstring)):
        bit = bitstring[i]
        child = child + iif(random() < rate, iif(bit == '1', '0', '1'), bit)
    return child
    def normalize_Y(self):
        users = self.Y_data[:, 0]  # all users - first col of the Y_data
        self.Ybar_data = self.Y_data.copy()
        self.mu = np.zeros((self.n_users, ))
        for n in xrange(self.n_users):
            # row indices of rating done by user n
            # since indices need to be integers, we need to convert
            ids = np.where(users == n)[0].astype(np.int32)
            # indices of all ratings associated with user n
            item_ids = self.Y_data[ids, 1]
            # and the corresponding ratings
            ratings = self.Y_data[ids, 2]
            # take mean
            m = np.mean(ratings)
            if np.isnan(m):
                m = 0  # to avoid empty array and nan value
            self.mu[n] = m
            # normalize
            self.Ybar_data[ids, 2] = ratings - self.mu[n]

        ################################################
        # form the rating matrix as a sparse matrix. Sparsity is important
        # for both memory and computing efficiency. For example, if #user = 1M,
        # #item = 100k, then shape of the rating matrix would be (100k, 1M),
        # you may not have enough memory to store this. Then, instead, we store
        # nonzeros only, and, of course, their locations.
        self.Ybar = sparse.coo_matrix(
            (self.Ybar_data[:, 2],
             (self.Ybar_data[:, 1], self.Ybar_data[:, 0])),
            (self.n_items, self.n_users))
        self.Ybar = self.Ybar.tocsr()
示例#19
0
def main():
    # https://habr.com/post/317328/

    from numba import cuda
    import numpy as np
    # import matplotlib.pyplot as plt
    from time import time

    from numpy.core.tests.test_mem_overlap import xrange

    n = 512
    blockdim = 16, 16
    griddim = int(n / blockdim[0]), int(n / blockdim[1])

    L = 1.
    h = L / n
    dt = 0.1 * h ** 2
    nstp = 5000

    @cuda.jit("void(float64[:], float64[:])")
    def nextstp_gpu(u0, u):
        i, j = cuda.grid(2)

        u00 = u0[i + n * j]
        if i > 0:
            uim1 = u0[i - 1 + n * j]
        else:
            uim1 = 0.
        if i < n - 1:
            uip1 = u0[i + 1 + n * j]
        else:
            uip1 = 0.
        if j > 0:
            ujm1 = u0[i + n * (j - 1)]
        else:
            ujm1 = 0.
        if j < n - 1:
            ujp1 = u0[i + n * (j + 1)]
        else:
            ujp1 = 1.

        d2x = (uim1 - 2. * u00 + uip1)
        d2y = (ujm1 - 2. * u00 + ujp1)
        u[i + n * j] = u00 + (dt / h / h) * (d2x + d2y)

    u0 = np.full(n * n, 0., dtype=np.float64)
    u = np.full(n * n, 0., dtype=np.float64)

    st = time()

    d_u0 = cuda.to_device(u0)
    d_u = cuda.to_device(u)
    for i in xrange(0, int(nstp / 2)):
        nextstp_gpu[griddim, blockdim](d_u0, d_u)
        nextstp_gpu[griddim, blockdim](d_u, d_u0)

    cuda.synchronize()
    u0 = d_u0.copy_to_host()
    print('time on GPU = ', time() - st)
示例#20
0
 def fit(self):
     self.normalize_Y()
     for it in xrange(self.max_iter):
         self.updateX()
         self.updateW()
         if (it + 1) % self.print_every == 0:
             rmse_train = self.evaluate_RMSE(self.Y_raw_data)
             print('iter =', it + 1, ', loss =', self.loss(),
                   ', RMSE train =', rmse_train)
示例#21
0
    def evaluate_RMSE(self, rate_test):
        n_tests = rate_test.shape[0]
        SE = 0  # squared error
        for n in xrange(n_tests):
            pred = self.pred(rate_test[n, 0], rate_test[n, 1])
            SE += (pred - rate_test[n, 2])**2

        RMSE = np.sqrt(SE / n_tests)
        return RMSE
示例#22
0
def population(count, length, min, max):
    """
    Create a number of individuals (i.e. a population).
    :param count: the number of individuals in the population
    :param length: the number of values per individual
    :param min: the min possible value in an individual's list of values
    :param max: the max possible value in an individual's list of values
    :return:
    """
    return [individual(length, min, max) for x in xrange(count)]
示例#23
0
def evaluate(Yhat, rates, W, b):
    se = 0
    cnt = 0
    for n in xrange(n_users):
        ids, scores_truth = get_items_rated_by_user(rates, n)
        scores_pred = Yhat[ids, n]
        e = scores_truth - scores_pred
        se += (e * e).sum(axis=0)
        cnt += e.size
    return sqrt(se / cnt)
示例#24
0
def crossover(parent1, parent2, rate):
    if random() >= rate:
        return parent1
    child = ""
    for i in xrange(0, len(parent1)):
        if random() < 0.5:
            child += parent1[i]
        else:
            child += parent2[i]
    return child
示例#25
0
def biKmeans(dataSet, k):
    numSamples = dataSet.shape[0]
    # first column stores which cluster this sample belongs to,
    # second column stores the error between this sample and its centroid
    clusterAssment = mat(zeros((numSamples, 2)))

    # step 1: the init cluster is the whole data set
    centroid = mean(dataSet, axis=0).tolist()[0]
    centList = [centroid]
    for i in xrange(numSamples):
        clusterAssment[i, 1] = euclDistance(mat(centroid), dataSet[i, :])**2

    while len(centList) < k:
        # min sum of square error
        minSSE = 100000.0
        numCurrCluster = len(centList)
        # for each cluster
        for i in range(numCurrCluster):
            # step 2: get samples in cluster i
            pointsInCurrCluster = dataSet[nonzero(
                clusterAssment[:, 0].A == i)[0], :]

            # step 3: cluster it to 2 sub-clusters using k-means
            centroids, splitClusterAssment = kmeans(pointsInCurrCluster, 2)

            # step 4: calculate the sum of square error after split this cluster
            splitSSE = sum(splitClusterAssment[:, 1])
            notSplitSSE = sum(
                clusterAssment[nonzero(clusterAssment[:, 0].A != i)[0], 1])
            currSplitSSE = splitSSE + notSplitSSE

            # step 5: find the best split cluster which has the min sum of square error
            if currSplitSSE < minSSE:
                minSSE = currSplitSSE
                bestCentroidToSplit = i
                bestNewCentroids = centroids.copy()
                bestClusterAssment = splitClusterAssment.copy()

                # step 6: modify the cluster index for adding new cluster
        bestClusterAssment[nonzero(bestClusterAssment[:, 0].A == 1)[0],
                           0] = numCurrCluster
        bestClusterAssment[nonzero(bestClusterAssment[:, 0].A == 0)[0],
                           0] = bestCentroidToSplit

        # step 7: update and append the centroids of the new 2 sub-cluster
        centList[bestCentroidToSplit] = bestNewCentroids[0, :]
        centList.append(bestNewCentroids[1, :])

        # step 8: update the index and error of the samples whose cluster have been changed
        clusterAssment[nonzero(clusterAssment[:, 0].A ==
                               bestCentroidToSplit), :] = bestClusterAssment

    print
    'Congratulations, cluster using bi-kmeans complete!'
    return mat(centList), clusterAssment
示例#26
0
def tensorflowProcess(clustersNumber,
                      dataLessTarget,
                      datasetName,
                      runinfo=None,
                      initialClusters=None):
    import tensorflow as tf
    from numpy.core.tests.test_mem_overlap import xrange
    '''
    https://www.tensorflow.org/api_docs/python/tf/contrib/factorization/KMeansClustering
    '''
    outputFile = datasetOutFile(datasetName, TENSORFLOW_ALGO, runinfo=runinfo)
    clustersOutputFile = datasetOutFile(datasetName,
                                        centroidFor(TENSORFLOW_ALGO),
                                        runinfo=runinfo)

    if os.path.exists(outputFile) and os.path.exists(clustersOutputFile):
        print("tensorflow skipped")
        return

    points = dataLessTarget.values

    def input_fn():
        return tf.train.limit_epochs(tf.convert_to_tensor(points,
                                                          dtype=tf.float32),
                                     num_epochs=1)

    if initialClusters is None:
        kmeans = tf.contrib.factorization.KMeansClustering(
            num_clusters=clustersNumber, use_mini_batch=False)
    else:
        kmeans = tf.contrib.factorization.KMeansClustering(
            num_clusters=clustersNumber,
            initial_clusters=initialClusters,
            use_mini_batch=False)

    # train
    num_iterations = 10
    previous_centers = None
    for _ in xrange(num_iterations):
        kmeans.train(input_fn)

    with open(outputFile, 'w') as csvfile:
        filewriter = csv.writer(csvfile, quoting=csv.QUOTE_MINIMAL)

        cluster_indices = list(kmeans.predict_cluster_index(input_fn))
        for index, point in enumerate(points):
            cluster_index = cluster_indices[index]
            filewriter.writerow([index, cluster_index])

    # Clusters saving
    with open(clustersOutputFile, 'w') as clusterFile:
        filewriter = csv.writer(clusterFile, quoting=csv.QUOTE_MINIMAL)

        for row in kmeans.cluster_centers():
            filewriter.writerow(row.tolist())
def _process_image_files(name, filenames, texts, labels, num_shards):
    """Process and save list of images as TFRecord of Example protos.
    
      Args:
        name: string, unique identifier specifying the data set
        filenames: list of strings; each string is a path to an image file
        texts: list of strings; each string is human readable, e.g. 'dog'
        labels: list of integer; each integer identifies the ground truth
        num_shards: integer number of shards for this data set.
      """
    assert len(filenames) == len(texts)
    assert len(filenames) == len(labels)
    
    # Break all images into batches with a [ranges[i][0], ranges[i][1]].
    spacing = np.linspace(0, len(filenames), FLAGS.num_threads + 1).astype(np.int)
    ranges = []
    threads = []
    for i in xrange(len(spacing) - 1):
        ranges.append([spacing[i], spacing[i+1]])
    # Launch a thread for each batch.
    print('Launching %d threads for spacings: %s' % (FLAGS.num_threads, ranges))
    sys.stdout.flush()
    
    # Create a mechanism for monitoring when all threads are finished.
    coord = tf.train.Coordinator()
    
    # Create a generic TensorFlow-based utility for converting all image codings.
    coder = ImageCoder()
    
    threads = []
    for thread_index in xrange(len(ranges)):
        args = (coder, thread_index, ranges, name, filenames,
                texts, labels, num_shards)
        t = threading.Thread(target=_process_image_files_batch, args=args)
        t.start()
        threads.append(t)
    
    # Wait for all the threads to terminate.
    coord.join(threads)
    print('%s: Finished writing all %d images in data set.' %
            (datetime.now(), len(filenames)))
    sys.stdout.flush()
示例#28
0
def prob_select(choices):
    xsum = sum(map(lambda x: x['prob'], choices))
    if xsum == 0.0:
        return choices[random.randrange(len(choices))]['city']
    v = random.random()
    for i in xrange(len(choices)):
        choice = choices[i]
        v -= (choice['prob'] / xsum)
        if v <= 0.0:
            return choice['city']
    return choices[-1]['city']
示例#29
0
def local_update_pheromone(pheromone, cand, c_local_phero, init_phero):
    for i in xrange(len(cand['vector'])):
        x = cand['vector'][i]
        if i == len(cand['vector']) - 1:
            y = cand['vector'][0]
        else:
            y = cand['vector'][i + 1]
    value = (
        (1.0 - c_local_phero) * pheromone[x][y]) + (c_local_phero * init_phero)
    pheromone[x][y] = value
    pheromone[y][x] = value
示例#30
0
    def pred_for_user(self, user_id):
        ids = np.where(self.Y_data_n[:, 0] == user_id)[0]
        items_rated_by_u = self.Y_data_n[ids, 1].tolist()

        y_pred = self.X.dot(self.W[:, user_id]) + self.mu[user_id]
        predicted_ratings = []
        for i in xrange(self.n_items):
            if i not in items_rated_by_u:
                predicted_ratings.append((i, y_pred[i]))

        return predicted_ratings
count =0
tempArr = [10]
for line in trainingData:
 tempArr = line.split(",")
 trainingOutArray[count] = float(tempArr[10])
 column = 1;
 while(column<=9):
     trainingDataArray[count][column-1] = float(tempArr[column])
     column = column + 1
 count = count +1
 if count>=h:
     break

trainingDataArray = np.array(trainingDataArray)
trainingOutArray = np.array(trainingOutArray).T

syn0 = 2*np.random.random((w,h))
syn1 = 2*np.random.random((h,))

for j in xrange(100):
    l1 = 1/(1+np.exp(-(np.dot(trainingDataArray,syn0))))
    l2 = 1/(1+np.exp(-(np.dot(l1,syn1))))
    l2_delta = (trainingOutArray - l2)*(l2*(1-l2))
    l1_delta = l2_delta.dot(syn1.T) * (l1 * (1-l1))
    syn1 += l1.T.dot(l2_delta)
    syn0 += trainingDataArray.T.dot(l1_delta)

print(trainingOutArray)
print(l2)

print(l2.shape)