Example #1
0
def getgooglemapping(fname, source, target):
    """
    Given a filename and a source and target languages, this will gather words
    from the the fifth column of each tab-sep row in fname and create a word
    mapping from source to target. Note that if Google cannot find a translation,
    then it will simply return the word as is. 
    
    Language codes are Google two letter codes (en, uz, tr, de, etc.)

    Results are stored in python shelves.
    """

    service = build('translate', 'v2',developerKey=API_KEY)
    
    memo = shelve.open("shelves/translatedict-" + source + "-" + target + ".shelf")

    with codecs.open(fname, "r", "utf-8") as f:
        lines = f.readlines()        

    words = []
    chars = 0
    
    for line in lines:            
        sline = line.split("\t")
        if len(sline) > 5:
            srcword = sline[5].strip()
            if srcword not in memo:
                words.append(srcword)
                chars += len(srcword)

    # get the cost, fail if user refuses.
    utils.cost(chars)
    
    # gather a list of words to be translated.
    for i in range(0, len(words), 20):
        # Use 75 words at a time, to avoid API limits.
        iwords = words[i:i+75]
        print("size of request:",len(iwords))
        try:
            response = service.translations().list(source=source,target=target, q=iwords).execute()
            if len(response["translations"]) > 0:
                translations = response["translations"]
                for w,t in zip(iwords,translations):
                    tword = t["translatedText"]
                    memo[w] = tword
            else:
                print("No translations in returned list...")

        except Exception as e:
            print("Whoops... exception")
            print(e)
            import traceback
            traceback.print_exc()
            

    ret = dict(memo)
    memo.close()
    return ret
Example #2
0
    def test_cost(self):
        self.assertEqual(cost(formulas=read_sample('x, y\n1\n, 4'),
                              relation=read_sample('x, y\n1, 2\n1, 4\n1, 4')),
                         3)

        self.assertEqual(cost(formulas=read_sample('x\n{1}\n{4}'),
                              relation=read_sample('x\n{1 2}\n{1 4}\n{1 4}')),
                         3)

        self.assertEqual(cost(formulas=read_sample('x,y,z\na,b,\n,,c'),
                              relation=read_sample(three_attr_null)),
                         6)
Example #3
0
def align_nw_2by2(seq1, seq2):
    """ Perform 2 by 2 sequence alignment with the Needleman and Wunsch algorithm.
    Parameters:
    ----------
    seq1: string of characters of len=n1
        sequence of nucleotides
    seq2: string of characters of len=n2
        sequence of nucleotides
    Return:
    ------
    scores: matrix of floats of dim (n1+1, n2+1)
        
    paths: matrix of floats of dim (n1+1, n2+1)
        
    aligned: list of 2 strings
        contains the aligned seq1 and seq2 respectively 
    L: list of int
        list of the positions of the inserted gaps
    """
    n1, n2 = len(seq1), len(seq2)
    # initialization: path matrix, score matrix, aligned sequences list, gaps list
    paths = np.zeros((n1 + 1, n2 + 1))
    scores = np.zeros((n1 + 1, n2 + 1))
    aligned = ["", ""]
    L = []
    for i in range(1, n1 + 1):  # browsing seq1 indexes
        scores[i, 0] = scores[i - 1, 0] - 3
        paths[i, 0] = 3
        for j in range(1, n2 + 1):  # browsing seq2 indexes
            scores[0, j] = scores[0, j - 1] - 3
            paths[0, j] = 1
            c1 = scores[i - 1, j - 1] + cost(seq1[i - 1], seq2[j - 1])
            c2 = scores[i - 1, j] - 3
            c3 = scores[i, j - 1] - 3
            scores[i, j] = max(c1, c2, c3)
            if scores[i, j] == c1:
                paths[i, j] = 2
            elif scores[i, j] == c2:
                paths[i, j] = 3
            elif scores[i, j] == c3:
                paths[i, j] = 1

    while i != 0 or j != 0:
        if paths[i, j] == 1:
            aligned[0] += '_'
            aligned[1] += seq2[j - 1]
            j = j - 1
        elif paths[i, j] == 2:
            aligned[0] += seq1[i - 1]
            aligned[1] += seq2[j - 1]
            j = j - 1
            i = i - 1
        elif paths[i, j] == 3:
            aligned[0] += seq1[i - 1]
            aligned[1] += '_'
            L.append(j)  # save gaps introduced by alignment
            i = i - 1
    aligned[0] = aligned[0][::-1]
    aligned[1] = aligned[1][::-1]
    return scores, paths, aligned, L
Example #4
0
    def train(self):
        self.costs = []
        gradients = []

        for i, (char, target) in enumerate(zip(self.text, self.text[1:])):
            self.x = expand(np.eye(self.x_size)[self.char_to_i[char]])
            self.target = expand(np.eye(self.x_size)[self.char_to_i[target]])

            self.y = self.forward_pass()
            gradients.append(self.backward_pass())

            if (i + 1) % self.batch_size == 0:
                dCdWxh, dCdWhh, dCdWhy, dCdBh, dCdBy = np.average(gradients,
                                                                  axis=0)
                self.W_xh -= self.learning_rate * dCdWxh
                self.W_hh -= self.learning_rate * dCdWhh
                self.W_hy -= self.learning_rate * dCdWhy
                self.B_h -= self.learning_rate * dCdBh.T
                self.B_y -= self.learning_rate * dCdBy.T

            self.costs.append(cost(self.target, self.y))
            self.diagnose(i)

        plt.plot(self.costs)
        plt.show(block=False)
        time.sleep(1000)
Example #5
0
def generate_route(i: int,
                   T: np.ndarray,
                   Tp: np.ndarray,
                   routes: np.ndarray,
                   unvisited: np.ndarray,
                   routes_distance: np.ndarray,
                   distances: np.ndarray,
                   cost: callable = cost):
    """ Generates a route and stores it in the i-th row
        of routes. Route distance are also updated.

        Args:
            i: id of route generated, must be < routes.shape[0]
            T: current Transition matrix
            Tp: Transition matrix companion for copies
            routes: table containing all routes
            unvisited: table containing boolean indicating whether 
                       each city has been visited in current route
            routes_distance: table containing distances associated to each route
            distances: distance matrix between cities
            cost: function to compute a route's distance.
        
        Returns:
            None

    """
    assert i < routes.shape[0], "Route index is out of range"
    #-- Restore Tp's values to T's
    Tp[:, :] = T[:, :]
    #-- Restore unvisited cities
    unvisited[:] = True
    unvisited[0] = False
    #-- Iterate to generate visits
    for k in range(T.shape[0] - 2):
        #- Prevents transtionning to current state
        Tp[:, routes[
            i,
            k]] = 0.00000000001  # this should be an exact 0, putting 0 causes numba errors in the multinomial sampling.
        # investiguate this further or transition out of numba.
        #- RE-normalize rows
        # Be numba friendly
        for row in range(Tp.shape[0]):
            Tp[row, :] = Tp[row, :] / np.sum(Tp[row, :])
        # row_sums = Tp.sum(axis=1)
        # Tp = Tp / row_sums[:, np.newaxis]

        #- Sample next city to visit
        draw = np.random.multinomial(1, Tp[routes[i, k], :], 1)
        next_visit = np.where(draw == 1)[1][0]
        routes[i, k + 1] = int(next_visit)
        #- Update unvisited state
        unvisited[int(next_visit)] = False
    #-- Assign last visit
    routes[i, k + 2] = np.where(unvisited)[0][0]
    #-- Update distance table
    routes_distance[i] = cost(routes, distances, i)
    return None
Example #6
0
 def fit_sgd(self, Y, R):
     n_jokes = Y.shape[0]
     n_users = Y.shape[1]
     X, Theta = utils.init_par(n_users, n_jokes, self.n_features)
     start = time.time()
     for i in range(self.n_iter):
         
         X, Theta = utils.sgd(X, Theta, Y, self.lamb, R, init_learning_rate=self.learning_rate, max_iter=8)
         J = utils.cost(X, Theta, Y, self.lamb, R)
         print('cost: ' + str(J),', n_iter: '+str(i))
         if J < 200:
             break
     self.features = X
     self.coef = Theta
     self.cost = utils.cost(X, Theta, Y, self.lamb, R)
     end = time.time()
     self.train_time = end-start
     print('final cost: '+ str(self.cost),'\n'
           'train time: '+str(self.train_time))
     return
Example #7
0
def test(loader, trainable, log):
    in_patch_size = args.patch_size

    sample_patch_size = in_patch_size * args.patch_scale
    sample_max_offset = args.max_offset * args.patch_scale

    axy1 = sample_patch_size // 2 - sample_max_offset
    axy2 = sample_patch_size // 2 + sample_max_offset

    for batch_index, (sample_patch_a, sample_patch_b, sample_gt) in enumerate(loader):
        sample_patch_a = sample_patch_a.cuda()
        sample_patch_b = sample_patch_b.cuda()

        in_patch_a = F.interpolate(sample_patch_a, size=[in_patch_size, in_patch_size], mode='bilinear', align_corners=True)
        in_patch_b = F.interpolate(sample_patch_b, size=[in_patch_size, in_patch_size], mode='bilinear', align_corners=True)

        predictions = trainable(in_patch_a, in_patch_b)

        losses_xys = predictions[:, :2] - sample_gt.cuda()
        losses_xys_2 = losses_xys * losses_xys
        losses_xys_sample = losses_xys_2.sum(dim=1)

        for i in range(0, predictions.size()[0]):
            gts = [round(y, 4) for y in sample_gt[i].tolist()]
            xys = [round(p, 4) for p in losses_xys[i].tolist()]
            ps = [round(p, 4) for p in predictions[i].tolist()]
            log.info('Loss: {:.4f} GT:{} XY loss:{} Prediction:{}'.format(losses_xys_sample[i], gts, xys, ps))

            if True or losses_xys_sample[i] > 0.05:
                cropped_patch_a = sample_patch_a[i, :, axy1:axy2, axy1:axy2]
                cost, mini, maxi = utils.cost(cropped_patch_a, sample_patch_b[i])


                fig = plt.figure(figsize=(12, 12), dpi=112)
                ax1 = fig.add_subplot(221)
                ax1.imshow(in_patch_a[i].squeeze(0).cpu(), cmap='gray')
                ax2 = fig.add_subplot(222)
                ax2.imshow(in_patch_b[i].squeeze(0).cpu(), cmap='gray')
                ax3 = fig.add_subplot(223)
                ax3.imshow(sample_patch_a[i].squeeze(0).cpu(), cmap='gray')
                ax4 = fig.add_subplot(224)
                ax4.imshow(cost, cmap='gray')
                gt = sample_gt[i] * sample_max_offset + sample_max_offset + args.patch_scale // 2
                ax4.plot(gt[0], gt[1], marker='x', color="green")
                xy = predictions[i, :2].cpu().detach() * sample_max_offset + sample_max_offset + args.patch_scale // 2
                ax4.plot(xy[0], xy[1], marker='x', color="red")
                err = predictions[i, 2:4].cpu().detach().abs() * sample_max_offset * 4
                ax4.add_patch(Ellipse((xy[0], xy[1]), width=err[0], height=err[1],
                     edgecolor='red',
                     facecolor='none',
                     linewidth=1))
                plt.show()
    def fit(self,
            X,
            Y,
            learning_rate=10e-8,
            reg=10e-8,
            epochs=10000,
            show_figure=False):

        X, Y = shuffle(X, Y)
        K = len(set(Y))
        Xvalid, Yvalid = X[-1000:], Y[-1000:]
        Tvalid = y2indicator(Yvalid, K)
        X, Y = X[:-1000], Y[:-1000]

        N, D = X.shape

        T = y2indicator(Y, K)
        self.W1 = np.random.randn(D, self.M) / np.sqrt(D + self.M)
        self.b1 = np.zeros(self.M)

        self.W2 = np.random.randn(self.M, K) / np.sqrt(self.M + K)
        self.b2 = np.zeros(K)

        costs = []
        best_validation_error = 1
        for i in xrange(epochs):
            pY, Z = self.forward(X)
            # gradient descent step
            self.W2 -= learning_rate * (Z.T.dot(pY - T) + reg * self.W2)
            self.b2 -= learning_rate * ((pY - T).sum(axis=0) + reg * self.b2)

            self.W1 -= learning_rate * (X.T.dot(
                (pY - T).dot(self.W2.T) * Z * (1 - Z)) + reg * self.W1)
            self.b1 -= learning_rate * (((pY - T).dot(self.W2.T) * Z *
                                         (1 - Z)).sum(axis=0) + reg * self.b1)

            if i % 10 == 0:
                pYvalid, Zvalid = self.forward(Xvalid)

                c = cost(Tvalid, pYvalid)
                costs.append(c)
                e = error_rate(Yvalid, np.argmax(pYvalid, axis=1))

                print "i", i, "cost:", c, "error", e
                if e < best_validation_error:
                    best_validation_error = e
        print "best_validation_error:", best_validation_error

        if show_figure:
            plt.plot(costs)
            plt.show()
Example #9
0
    def fit(self,
            X,
            Y,
            learning_rate=10e-6,
            reg=10e-7,
            epochs=1000,
            show_figure=False):
        X, Y = shuffle(X, Y)
        x_valid = X[-10:]
        y_valid = Y[-10:]
        t_valid = utils.y2indicator(y_valid)

        x = X[:-10]
        y = Y[:-10]
        t = utils.y2indicator(y)

        N, D = x.shape
        K = len(set(y))

        self.W1 = np.random.randn(D, self.M)
        self.b1 = np.random.randn(self.M)

        self.W2 = np.random.randn(self.M, K)
        self.b2 = np.random.randn(K)

        costs = []

        for i in range(epochs):
            pY, Z = self.forward(x)

            #Updating Weights
            D = pY - t
            self.W2 -= learning_rate * (Z.T.dot(D) + reg * self.W2)
            self.b2 -= learning_rate * (D.sum() + reg * self.b2)

            dZ = D.dot(self.W2.T) * Z * (1 - Z)
            self.W1 -= learning_rate * (x.T.dot(dZ) + reg * self.W1)
            self.b1 -= learning_rate * (dZ.sum() + reg * self.b1)

            if i % 10 == 0:
                pY_valid, _ = self.forward(x_valid)
                c = utils.cost(t_valid, pY_valid)
                costs.append(c)
                e = utils.error_rate(y_valid, np.argmax(pY_valid, axis=1))
                print("i:", i, " cost: ", c, " error: ", e)

        if show_figure:
            plt.plot(costs)
            plt.show()
Example #10
0
    def fit(self, X, y, plot_cost=False):
        X_train, Y_train, X_test, Y_test = get_train_test(X,
                                                          y,
                                                          percent_train=0.7)
        n, d = X_train.shape
        k = Y_train.shape[1]

        self.W1, self.b1 = init_weight_bias(d, self.hidden_layer_sizes[0])
        self.W2, self.b2 = init_weight_bias(self.hidden_layer_sizes[0], k)
        costs = []
        best_validation_error = 1

        if (self.batch_size == 'auto'):
            self.batch_size = min(200, n)

        num_batches = int(n / self.batch_size)

        for i in range(self.max_iter):
            X_temp, Y_temp = shuffle(X_train, Y_train)
            for j in range(num_batches):
                X_temp, Y_temp = X_train[
                    j * self.batch_size:j * self.batch_size +
                    self.batch_size, :], Y_train[j * self.batch_size:j *
                                                 self.batch_size +
                                                 self.batch_size, :]
                Ypred, Z1 = self.forward(X_temp)

                pY_t = Ypred - Y_temp
                self.W2 -= self.learning_rate_init * (Z1.T.dot(pY_t))
                self.b2 -= self.learning_rate_init * (pY_t.sum(axis=0))
                dZ = pY_t.dot(self.W2.T) * (Z1 > 0)
                self.W1 -= self.learning_rate_init * X_temp.T.dot(dZ)
                self.b1 -= self.learning_rate_init * dZ.sum(axis=0)

            if (i % 2) == 0:
                pY_test, _ = self.forward(X_test)
                c = cost(Y_test, pY_test)
                costs.append(c)
                e = error_rate(Y_test.argmax(axis=1), pY_test.argmax(axis=1))
                print('Iteration', i, 'Cost:', c, 'Error Rate:', e)
                if e < best_validation_error:
                    best_validation_error = e
        print("best_validation_error:", best_validation_error)

        if plot_cost:
            plt.plot(costs)
            plt.show()
Example #11
0
def test(DCN, gen):
    accuracies_test = [[] for ii in gen.clusters['test']]
    iterations_te = int(gen.num_examples_test / batch_size)
    for it in range(iterations_te):
        for i, cl in enumerate(gen.clusters['test']):
            # depth tells how many times the dynamic model will be unrolled
            depth = np.log2(cl).astype(int)
            _, length = gen.compute_length(cl)
            input, _ = gen.get_batch(batch=it, clusters=cl, mode='test')
            # forward DCN
            out = DCN(input, length, depth, it=it, mode='test', dynamic=True)
            Phis, Inputs_N, e, loss, pg_loss, var = out
            cost = utils.cost(input,
                              e.data.cpu().numpy(),
                              n_clusters=args.num_clusters)
            accuracies_test[i].append(cost)
    accuracies_test = [sum(accs) / iterations_te for accs in accuracies_test]
    return accuracies_test
Example #12
0
def find_optimum(relation, k):

    formulas = get_all_formulas(relation, True)

    logger.info("# formulas: %s", len(formulas))

    logger.debug("possible formulas: %s", relation_rep(formulas))

    all_subsets = list(subsets(formulas, k))

    logger.info("# subsets: %s", len(all_subsets))

    subset_costs = map(lambda x: cost(x, relation), all_subsets)

    ordered = [x for x in sorted(zip(subset_costs, all_subsets), key=lambda x: x[0])]

    best_cost = ordered[0][0]
    best = filter(lambda x: x[0] == best_cost, ordered)

    return best_cost, best
Example #13
0
def find_optimum(relation, k):
    """ Finds the optimum set of formulas and its cost.
    This method generates all formulas, all subsets of formulas and
    then calculates the cost for every one of them. This can be very slow."""

    formulas = get_all_formulas(relation, True)

    logger.info('# formulas: %s', len(formulas))

    logger.debug('possible formulas: %s', relation_rep(formulas))

    all_subsets = list(subsets(formulas, k))

    logger.info('# subsets: %s', len(all_subsets))

    subset_costs = map(lambda x: cost(x, relation), all_subsets)

    ordered = [x for x in sorted(zip(subset_costs, all_subsets), key=lambda x: x[0])]

    best_cost = ordered[0][0]
    best = filter(lambda x: x[0] == best_cost, ordered)

    return best_cost, best
            y = trainLabels[:, batch_indices]

            for l in range(1, L):
                a[l + 1], z[l + 1] = fc(w[l], a[l])

            delta[L] = (a[L] - y) * (a[L] * (1 - a[L]))
            print(delta[L])

            for l in range(L - 1, 1, -1):
                delta[l] = bc(w[l], z[l], delta[l + 1])

            for l in range(1, L):
                grad_w = np.dot(delta[l + 1], a[l].T)
                w[l] = w[l] - alpha * grad_w

            J.append(cost(a[L], y) / mini_batch)
            Acc.append(accuracy(a[L], y))

        a[1] = X_test
        y = testLabels

        for l in range(1, L):
            a[l + 1], z[l + 1] = fc(w[l], a[l])

        print(epoch_num, "training acc:", Acc[-1], 'test acc:',
              accuracy(a[L], y))

    plt.figure()
    plt.plot(J)
    plt.savefig("J.png")
    plt.close()
Example #15
0
        print("step %d, lr %.4f, training accuracy %g" %
              (i + 1, sess.run(learning_rate), train_accuracy))

        ratio_w, sp = comp(S_vars)
        _sp = sess.run(sp)

        print("loss: %.4f sp: %0.4f %0.4f %0.4f %0.4f :: using param : %.4f" %
              (tr_loss, _sp[0], _sp[1], _sp[2], _sp[3], sess.run(ratio_w)))

    # Training
    opt.run(feed_dict={X: batch[0], Y: batch[1], keep_prob: 0.5})
    if FLAGS.cges:
        _ = sess.run(cges_op_list)

    # Testing
    if (i + 1) % FLAGS.test_iter == 0:
        test_acc = sess.run(accuracy,
                            feed_dict={
                                X: mnist.test.images,
                                Y: mnist.test.labels,
                                keep_prob: 1.0
                            })
        print("test accuracy %0.4f" % test_acc)

        # Computing FLOP
        flop = cost(_sp)
        print("FLOP : %.4f" % flop)
        if FLAGS.cges:
            print('CGES, lambda : %f, mu : %.2f, chvar : %.2f' %
                  (lamb, mu, chvar))
Example #16
0
    def test_cost(self):
        self.assertEqual(cost(formulas=read_sample("x, y\n1\n, 4"), relation=read_sample("x, y\n1, 2\n1, 4\n1, 4")), 3)

        self.assertEqual(cost(formulas=read_sample("x\n{1}\n{4}"), relation=read_sample("x\n{1 2}\n{1 4}\n{1 4}")), 3)
Example #17
0
def translatefile(fname, outfname, source, target, format="conll"):
    """
    Given a filename, an outfname, and a source and target languages, this will translate
    the first word of each tab-sep row in fname from source to target and write to outfname. Language codes are Google
    two letter codes (en, uz, tr, de, etc.)
    """

    outlines = []
    service = build('translate', 'v2', developerKey=API_KEY)

    h = html.parser.HTMLParser()

    if format == "conll":
        lines = utils.readconll(fname)
    elif format == "plaintext":
        lines = utils.readplaintext(fname)
    else:
        print("Format not known: " + format)
        exit()

    memo = shelve.open("shelves/sents-" + source + "-" + target + ".shelf")

    sents = []
    sent = ""

    # gather all sentences
    for line in lines:
        sline = line.split("\t")
        if len(sline) > 5:
            srcword = str(sline[5]).strip()
            sep = " "
            #if srcword in ["'s","n't","'ve"]:
            #    sep = ""
            sent += sep + srcword

        else:
            sent = sent.strip()
            sents.append(sent)
            sent = ""

    if len(sent) > 0:
        sents.append(sent)

    chars = 0
    trans = []
    for sent in sents:
        if sent not in memo:
            trans.append(sent)
            chars += len(sent)

    # calculate the cost and fail if user refuses
    utils.cost(chars)

    outsents = []
    # gather a list of words to be translated.
    for i in range(0, len(trans), 20):
        isents = trans[i:i + 50]
        print("size of request:", len(isents))
        try:
            response = service.translations().list(source=source,
                                                   target=target,
                                                   q=isents).execute()
            if len(response["translations"]) > 0:
                translations = response["translations"]
                for w, t in zip(isents, translations):
                    tsent = t["translatedText"]
                    memo[w] = tsent
            else:
                print("No translations...")

        except Exception as e:
            print("Whoops... exception")
            print(e)

    outlines = []

    # these will be written to a file for fast_align to use.
    parlines = []

    for sent in sents:
        outsent = memo[sent]

        # fix outsent?
        # try some simple tokenization.
        outsent = h.unescape(outsent)

        tokens = []
        for word in outsent.split():
            while len(word) > 0 and word[0] in string.punctuation:
                tokens.append(word[0])
                word = word[1:]

            if len(word) == 0:
                continue

            after = []
            while len(word) > 0 and word[-1] in string.punctuation:
                after.insert(0, word[-1])
                word = word[:-1]

            tokens.append(word)
            tokens.extend(after)

        outsent = " ".join(tokens)

        parlines.append(sent + " ||| " + outsent + "\n")

        #out.write(line);
        ssent = outsent.split()
        for w in ssent:
            w = h.unescape(w)

            if w.endswith("."):
                outlines.append("O\t0\t0\tx\tx\t" + w[:-1] + "\tx\tx\t0\n")
                outlines.append("O\t0\t0\tx\tx\t.\tx\tx\t0\n")
            else:
                outlines.append("O\t0\t0\tx\tx\t" + w + "\tx\tx\t0\n")

        outlines.append("\n")

    with codecs.open("text.en-" + target, "w", "utf8") as out:
        print("Writing to: text.en-" + target)
        for line in parlines:
            out.write(line)

    print("Writing to:", outfname)
    if format == "conll":
        utils.writeconll(outfname, outlines)
    elif format == "plaintext":
        utils.writeplaintext(outfname, outlines)
    else:
        print("Unknown format: " + format)

    memo.close()
Example #18
0
def run(episode, learning_rate=0.01, reward_decay=0.9, e_greedy=0.9):

    print('------------------Environment------------------')
    print('  length_range:\t\t', params.length_range)
    print('  priority_range:\t', params.priority_range)
    print('  sensors_amount:\t', params.sensors_amount)
    print('  s:\t\t\t', params.s)
    print('  v:\t\t\t', params.v)
    print('  period:\t\t', params.period)
    print('  t_limit:\t\t', params.t_limit)
    print('  max_time:\t\t', params.max_time)
    print('  Random seed:\t\t', params.seed)
    print('--------------------Method---------------------')
    print('  algorithm:\t\tQL')
    print('  episode:\t\t', episode)
    print('  learning_rate:\t', learning_rate)
    print('  reward_decay:\t\t', reward_decay)
    print('  e_greedy:\t\t', e_greedy)
    print('-----------------------------------------------')

    RL = QTable(actions=list(range(params.sensors_amount)),
                learning_rate=learning_rate,
                reward_decay=reward_decay,
                e_greedy=e_greedy)

    costs = []
    best_uav, best_result, best_cost = None, None, float('inf')
    for _ in tqdm(range(episode)):
        # initial observation
        observation = str(-1)
        sensors, uav = generateMap()
        np.random.seed()

        previous_cost = cost(uav, sensors)

        while True:
            # RL choose action based on observation
            action = RL.choose_action(observation)

            # RL take action and get next observation and reward
            done = uav.fly_to(sensors[action]) is False
            _cost = cost(uav, sensors)
            _observation = (str(action) + '_' + observation)
            reward = (previous_cost - _cost) * 100
            previous_cost = _cost

            # RL learn from this transition
            RL.learn(observation, action, reward, _observation)

            # swap observation
            observation = _observation

            # break while loop when end of this episode
            if done:
                costs.append(_cost)
                if _cost < best_cost:
                    best_result = cost(uav, sensors, details=True)
                    best_cost = _cost
                    best_uav = UAV(uav)
                break
        del uav

    # output results
    print('Max time', params.max_time, 'Final time:', best_uav.records[-1][0])
    print('Best cost:', best_cost)

    # print('Q_table:\n', RL.q_table)

    # x, y = list(range(episode)), costs
    # plt.plot(x, y, color='red')
    # plt.show()

    # draw(best_uav, sensors, details=True)
    draw(best_uav, sensors, details=False)

    return best_result
def run():
    """Greedy Algorithm
    A greedy algorithm is an algorithmic paradigm that follows the problem
    solving heuristic of making the locally optimal choice at each stage with the
    intent of finding a global optimum.
    """

    print('------------------Environment------------------')
    print('  length_range:\t\t', params.length_range)
    print('  priority_range:\t', params.priority_range)
    print('  sensors_amount:\t', params.sensors_amount)
    print('  s:\t\t\t', params.s)
    print('  v:\t\t\t', params.v)
    print('  period:\t\t', params.period)
    print('  t_limit:\t\t', params.t_limit)
    print('  max_time:\t\t', params.max_time)
    print('  Random seed:\t\t', params.seed)
    print('--------------------Method---------------------')
    print('  algorithm:\t\tGreedy')
    print('-----------------------------------------------')

    sensors, uav = generateMap()
    done, target_sensor_id = False, None
    c = float('inf')
    while not done:
        for sensor in sensors:
            _uav = UAV(uav)
            done = _uav.fly_to(sensor) is False
            if done:
                break

            _c = 0
            for _sensor in sensors:
                epsilon = set()
                for record in _sensor.records:
                    epsilon.add(record // (params.period * _sensor.p))
                epsilon = params.max_time / (params.period *
                                             _sensor.p) - len(epsilon)
                if epsilon != 0:
                    _c += epsilon * 1 / _sensor.p

            # _c = cost(uav, sensors)

            if _c < c:
                c = _c
                target_sensor_id = sensors.index(sensor)

            try:
                sensor.records.pop()
            except:
                pass

            del _uav

        uav.fly_to(sensors[target_sensor_id])
    best_cost = cost(uav, sensors)
    # output results
    print('Max time', params.max_time, 'Final time:', uav.records[-1][0])
    print('Best cost:', best_cost)

    # draw(uav, sensors, details=True)
    draw(uav, sensors, details=False)

    return cost(uav, sensors, details=True)
Example #20
0
def switch_view(request, panel_id):
    data_dict = {}
    user = request.user;
    userProfile = UserProfile.objects.get(user= user)
    panel_id = int(panel_id)
    panel = Panel.objects.get(pk = panel_id)
    session_id = panel.session
    group_id = panel.group.id
    
    
    group_name = panel.group.name
    panel_name = panel.name
    
    game = panel.game
    count = 0
    for tmp in panel.progress:
        if tmp == '1':
            count += 1
    cost = utils.cost(game.switch_cost)
    if count < cost:
        #cannot switch
        log = Log()
        log.game = game
        log.group = panel.group
        log.panel = panel
        log.user_profile = userProfile
        log.action = '\"Switch failed, Not enough credit\"'
        log.action_time = datetime.now()
        log.save()
        data_dict['panel_id'] = panel_id 
        nextUrl = reverse('mine.views.mine_field_view', args=(panel_id,))
        return HttpResponseRedirect(nextUrl)
    else:
        log = Log()
        log.game = game
        log.group = panel.group
        log.panel = panel
        log.user_profile = userProfile
        log.action = '\"Go to Switch page, cost %s\"' % cost
        log.action_time = datetime.now()
        log.save()
        
    #cost
    index = 0
    for s in panel.progress:
        if cost == 0:
            break
        if s == '1':
            cost -= 1
        index += 1
        
    panel.progress = '0'*(index+1) + panel.progress[index+1:]
    panel.save()
    
    panels = Panel.objects.filter(group__id = group_id, session = session_id)
    panel_id_status_dict={}
    for panel in panels:
        progress = panel.progress
        count = 0
        for s in progress:
            if s == '1':
                count +=1
        
        panel_id_status_dict[panel.id] = {'current': count, 'total': len(progress), 'panel_name': panel.name}
    panel_id_status_json = json.dumps(panel_id_status_dict)
    data_dict['panel_id_status_json'] = panel_id_status_json
    data_dict['panel_id'] = panel_id
    
    data_dict['group_name'] = group_name
    data_dict['panel_name'] = panel_name
    
    return render(request, 'switch.html', data_dict)
Example #21
0
def find_incremental(relation, k):
    summary = []

    all_cells = []
    for c in get_cells(relation):
        all_cells.append((potential({c}, set(), relation), c))

    all_cells = [x for x in all_cells if x[0] > 0]
    all_cells.sort()
    all_cells.reverse()

    summary = set()
    best_cost = float("inf")

    while True:
        improved_summary = None
        improved_cost = best_cost

        for i, (p, c) in enumerate(all_cells):
            # potential check
            d = best_cost - improved_cost
            if p < d:
                # can't get better any more so let's abort
                break

            is_better = False

            # try to add new formula
            if len(summary) < k:
                s = summary | {frozenset({c})}
                co = cost(s, relation)
                if co < improved_cost:
                    improved_summary = s
                    improved_cost = co
                    is_better = True

            # try to add cell to existing formula
            for f in summary:
                s = summary - {f}
                s.add(f | {c})
                co = cost(s, relation)
                if co < improved_cost:
                    improved_summary = s
                    improved_cost = co
                    is_better = True

            # update potential
            if is_better and best_cost != float("inf"):
                n = best_cost - improved_cost

                if n != potential({c}, summary, relation):
                    print best_cost, improved_cost, n, potential({c}, summary, relation)
                    print relation_rep(summary)
                    print c
                    assert False
            else:
                n = potential({c}, summary, relation)
            print "update from {} to {}".format(p, n)

            all_cells[i] = (n, c)

        # nothing to improve, stop
        if improved_summary is None:
            break

        summary = improved_summary
        best_cost = improved_cost

        # resort cells
        all_cells = [x for x in all_cells if x[0] > 0]
        all_cells.sort()
        all_cells.reverse()

    return best_cost, summary
seed(1)


# initialise de GA parameters
gaParam = {'popSize' : 30, 'noGen' : 20, 'pc' : 0.8, 'pm' : 0.1}
# problem parameters

# problParam = {'min' : MIN, 'max' : MAX, 'function' : fcEval, 'noDim' : noDim, 'noBits' : 8}

problParam = readNet("medium.txt")
problParam['noDim'] = problParam['noNodes']
print(problParam['noNodes'])

#
calcul = lambda x : cost(x,problParam)
problParam['function']  = calcul


# store the best/average solution of each iteration (for a final plot used to anlyse the GA's convergence)


ga = GA(gaParam, problParam)
ga.initialisation()
ga.evaluation()

minim = float("inf")

for g in range(gaParam['noGen']):
    #logic alg
    # ga.oneGeneration()
Example #23
0
def run(episode,
        learning_rate=0.01,
        reward_decay=0.9,
        e_greedy=0.9,
        replace_target_iter=200,
        memory_size=5000):

    print('------------------Environment------------------')
    print('  length_range:\t\t', params.length_range)
    print('  priority_range:\t', params.priority_range)
    print('  sensors_amount:\t', params.sensors_amount)
    print('  s:\t\t\t', params.s)
    print('  v:\t\t\t', params.v)
    print('  period:\t\t', params.period)
    print('  t_limit:\t\t', params.t_limit)
    print('  max_time:\t\t', params.max_time)
    print('  Random seed:\t\t', params.seed)
    print('--------------------Method---------------------')
    print('  algorithm:\t\tDQN')
    print('  episode:\t\t', episode)
    print('  learning_rate:\t', learning_rate)
    print('  reward_decay:\t\t', reward_decay)
    print('  e_greedy:\t\t', e_greedy)
    print('  replace_target_iter:\t', replace_target_iter)
    print('  memory_size:\t\t', memory_size)
    print('-----------------------------------------------')

    RL = DeepQNetwork(
        params.sensors_amount,
        params.sensors_amount,
        learning_rate=learning_rate,
        reward_decay=reward_decay,
        e_greedy=e_greedy,
        replace_target_iter=replace_target_iter,
        memory_size=memory_size,
        # output_graph=True
    )

    costs = []
    best_uav, best_result, best_cost = None, None, float('inf')
    step = 0
    for _ in tqdm(range(episode)):
        # initial observation
        sensors, uav = generateMap()
        observation = observe(uav, sensors)
        np.random.seed()

        previous_cost = cost(uav, sensors)

        while True:
            # RL choose action based on observation
            action = RL.choose_action(observation)

            # RL take action and get next observation and reward
            done = uav.fly_to(sensors[action]) is False
            _cost = cost(uav, sensors)
            _observation = observe(uav, sensors)
            reward = (previous_cost - _cost) * 100
            previous_cost = _cost

            # RL learn from this transition
            RL.store_transition(observation, action, reward, _observation)

            if (step > episode / 5) and (step % 5 == 0):
                RL.learn()

            # swap observation
            observation = _observation

            # break while loop when end of this episode
            if done:
                costs.append(_cost)
                if _cost <= best_cost:
                    best_result = cost(uav, sensors, details=True)
                    best_cost = _cost
                    best_uav = UAV(uav)
                break
            step += 1

    # output results
    print('Max time', params.max_time, 'Final time:', best_uav.records[-1][0])
    print('Best cost:', best_cost)

    # RL.plot_cost()

    # # show costs plot
    # x, y = list(range(episode)), costs
    # plt.plot(x, y, color='red')
    # plt.show()

    with open('./out/DQN_{:%m-%d-%H-%M-%S}.json'.format(params.time),
              "w+") as f:
        f.write(json.dumps(best_result))
        f.close()

    # draw(best_uav, sensors, details=True)
    draw(best_uav, sensors, details=False)

    return best_result