Beispiel #1
0
    def index(self, query=None):
        self.src = imagesearch.Searcher("test.db", self.voc)

        html = self.header
        html += """
      <br />
      Click an image to search. <a href='?query='>Random selection</a> of images.
      <br /><br />
      """
        if query:
            # データベースに問い合わせ上位の画像を得る
            res = self.src.query(query)[: self.maxres]
            for dist, ndx in res:
                imname = self.src.get_filename(ndx)
                html += "<a href='?query=" + imname + "'>"
                html += "<img src='" + imname + "' width='100' />"
                html += "</a>"
        else:
            # クエリがなければランダムに選択する
            random.shuffle(self.ndx)
            for i in self.ndx[: self.maxres]:
                imname = self.imlist[i]
                html += "<a href='?query=" + imname + "'>"
                html += "<img src='" + imname + "' width='100' />"
                html += "</a>"

        html += self.footer
        return html
Beispiel #2
0
    def index(self, query=None):
        self.src = imagesearch.Searcher('test.db', self.voc)

        html = self.header
        html += """
      <br />
      Click an image to search. <a href='?query='>Random selection</a> of images.
      <br /><br />
      """
        if query:
            # データベースに問い合わせ上位の画像を得る
            res = self.src.query(query)[:self.maxres]
            for dist, ndx in res:
                imname = self.src.get_filename(ndx)
                html += "<a href='?query=" + imname + "'>"
                html += "<img src='" + imname + "' width='100' />"
                html += "</a>"
        else:
            # クエリがなければランダムに選択する
            random.shuffle(self.ndx)
            for i in self.ndx[:self.maxres]:
                imname = self.imlist[i]
                html += "<a href='?query=" + imname + "'>"
                html += "<img src='" + imname + "' width='100' />"
                html += "</a>"

        html += self.footer
        return html
    def _run_one_sim(self, bandit_alg):
        self._round_counter = 0
        random.shuffle(self.arms)
        bandit_alg.initialize(n_arms=self.n_arms)
        best_arm = argmax([arm.expected_reward for arm in self.arms])

        self._simulation_counter += 1

        if self.verbose:
            print 'Starting Simulation %i - Best Arm is %i' % (self._simulation_counter, best_arm)

        results = [self._run_one_round(bandit_alg) for _ in range(self.n_rounds)]

        return results, best_arm
Beispiel #4
0
    def test_parity(self):
        """
        test parity with random data
        """
        for n_col in range(1,7):
            for n_row in range(1,50):
                A = arange(n_col*n_row)
                random.shuffle(A)
                A = A.reshape((n_row,n_col))

                s_parity = simplex_array_parity(A)

                for n,row in enumerate(A):
                    assert_equal(s_parity[n],relative_parity(row,sorted(row)))
Beispiel #5
0
    def __init__(self, b, g, I):

        #parameters
        self.b = b
        self.g = g
        self.t = 0

        self.N = 10000
        self.I = I
        #self.adjacencyList, self.N = read_network()
        #self.adjacencyList, self.N = read_network2()

        self.graph = igraph.Graph.Watts_Strogatz(1, self.N, nei=100, p=0.1)
        self.graph.simplify()
        self.adjacencyList = self.graph.get_adjlist()

        # going to use this to store the *indices* of agents in each state
        self.sAgentList = []
        self.iAgentList = []
        self.rAgentList = []

        # and here we're going to store the counts of how many agents are in each
        # state @ each time step
        self.sList = []
        self.iList = []
        self.rList = []
        self.newIList = []

        # and we'll use this to keep track of recovery times in the more
        # efficient implementation
        self.recoveryTimesHeap = []
        self.susceptibleTimeHeap = []

        # make a list of agent indices (easy because they're labeled 0-N)
        allAgents = range(self.N)

        # shuffle the list so there's no accidental correlation in agent actions
        random.shuffle(allAgents)

        # start with everyone susceptible
        self.sAgentList = copy.copy(allAgents)

        # now infect a few to infect at t = 0
        self.indexCases = []
        for i in xrange(I):
            indexCase = self.sAgentList[0]
            self.indexCases.append(indexCase)
            self.infectAgent(indexCase)
            self.iAgentList.append(indexCase)
Beispiel #6
0
    def run(self):
        raw_file = open('res_sirs.txt', 'w+')
        #same as while I > 0
        while len(self.iAgentList) > 0:
            tempIAgentList = []
            newI = 0

            for iAgent in self.iAgentList:
                for agent in self.adjacencyList[iAgent]:
                    if agent in self.sAgentList:
                        if random.random() < self.b:
                            newI += self.infectAgent(agent)
                            tempIAgentList.append(agent)

            # then get the list of who is recovering
            recoverList = self.recoverAgents()
            susceptibleList = self.susceptibleAgent()
            # for recoveries
            for recoverAgent in recoverList:
                self.iAgentList.remove(recoverAgent)
                self.rAgentList.append(recoverAgent)
                self.susAgent(recoverAgent)

            for susceptibleAgent in susceptibleList:
                self.rAgentList.remove(susceptibleAgent)
                self.sAgentList.append(susceptibleAgent)

            self.iAgentList.extend(tempIAgentList)

            self.sList.append(len(self.sAgentList))
            self.iList.append(len(self.iAgentList))
            self.rList.append(len(self.sAgentList))
            self.newIList.append(newI)


            self.t += 1

            print('t', self.t, 'numS', len(self.sAgentList), 'numI', len(self.iAgentList), 'numR', len(self.rAgentList))
            raw_file.write(str(self.t) + ' ' + str(len(self.sAgentList)) + ' ' + str(len(self.iAgentList)) +
                           ' ' + str(len(self.rAgentList)) + '\n')

            random.shuffle(self.iAgentList)

        return [self.sList, self.iList, self.rList, self.newIList]
Beispiel #7
0
def pattern_vs_shuffled(t, pattern, pp, ps):
    patl = len(pattern)
    seq = sp.zeros((len(t)))
    i = 0
    while (i < (len(t) - patl)):
        r = sp.rand()
        if (r < pp):
            seq[i:i + patl] = seq[i:i + patl] + pattern
            i = i + patl
            continue
        if (r < (pp + ps)):
            # copy and shuffle
            pcop = sp.copy(pattern)
            random.shuffle(pcop)
            seq[i:i + patl] = seq[i:i + patl] + pcop
            i = i + patl
            continue
        i += 1
    return seq
Beispiel #8
0
def randomize_graph(graph, nodePositions, mask, planar=0, iterations=1000):
    """Randomize graph by shuffling node positions and edges or edge capacities only.

    Parameters
    ----------
    graph : original graph
    nodePositions : node positions
    mask : binary array of cellular region of interest
    planar : ignore edge crossings (=0) or favor planar graph by reducing number of edge crossings (=1)
    iterations : number of iterations before returning original graph

    Returns
    -------
    randomizedGraph : randomized graph
    nodePositionsRandom : randomized node positions

    """
    nodeNumber = graph.number_of_nodes()
    edgeNumber = graph.number_of_edges()
    randomizedGraph = nx.empty_graph(nodeNumber, nx.MultiGraph())
    edgeLengths = np.array([property['edist'] for node1, node2, property in graph.edges(data=True)])
    bins = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200, 9999]
    binNumber = len(bins) - 1
    edgeBins = np.zeros(edgeNumber).astype('int')
    for index, (bin1, bin2) in enumerate(zip(bins[:-1], bins[1:])):
        edgesInBin = (edgeLengths >= bin1) * (edgeLengths < bin2)
        edgeBins[edgesInBin] = index
    edgeWeights = np.array([property['weight'] for node1, node2, property in graph.edges(data=True)])
    edgeCapacities = np.array([property['capa'] for node1, node2, property in graph.edges(data=True)])
    redoRandomization = 1
    iterationNumber = 0
    while (redoRandomization == 1 and iterationNumber < iterations):
        iterationNumber += 1
        nodePositionsRandom = cell_sample(mask, nodeNumber)[:, ::-1].astype('int')
        distanceMatrix = sp.spatial.distance_matrix(nodePositionsRandom, nodePositionsRandom)
        edgeBinsRandom = np.zeros((nodeNumber, nodeNumber)).astype('int')
        for index, (bin1, bin2) in enumerate(zip(bins[:-1], bins[1:])):
            edgesInBin = (distanceMatrix >= bin1) * (distanceMatrix < bin2)
            edgeBinsRandom[edgesInBin] = index
        edgeBinsRandom[np.tri(nodeNumber) > 0] =- 9999
        redoRandomization = 1 * np.max([(edgeBinsRandom == bins).sum() < (edgeBins == bins).sum() for bins in range(binNumber)])
    if (iterationNumber < iterations):
        sortBins = np.argsort(edgeLengths)[::-1]
        edgeBinsSort = edgeBins[sortBins]
        edgeWeightsSort = edgeWeights[sortBins]
        edgeCapacitiesSort = edgeCapacities[sortBins]
        addedEdges = []
        for edge in range(edgeNumber):
            candidateNodes = np.where(edgeBinsRandom == edgeBinsSort[edge])
            candidateNumber = len(candidateNodes[0])
            edgeCrossings = 9999
            selectedCandidates = random.sample(range(candidateNumber), min(50, candidateNumber))
            for candidate in selectedCandidates:
                node1 = candidateNodes[0][candidate]
                node2 = candidateNodes[1][candidate]
                edgeBetweenNodes = np.array([[nodePositionsRandom[node1][0], nodePositionsRandom[node2][0]], [nodePositionsRandom[node1][1], nodePositions[node2][1]]]).T
                crossingsOfEdges = planar * multi_line_intersect(np.array(edgeBetweenNodes), np.array(addedEdges)).sum()
                if (crossingsOfEdges < edgeCrossings and edgeBinsRandom[node1, node2] >= 0):
                    edgeCrossings = crossingsOfEdges
                    selectedEdge = edgeBetweenNodes
                    selectedNode1, selectedNode2 = node1, node2
            addedEdges.append(selectedEdge)
            nodeDistanceRandom = distanceMatrix[selectedNode1, selectedNode2]
            filamentLengthRandom = 1.0 * np.ceil(nodeDistanceRandom)
            edgeWeightRandom = edgeWeightsSort[edge]
            edgeCapacityRandom = edgeCapacitiesSort[edge]
            edgeLengthRandom = 1.0 * filamentLengthRandom / edgeWeightRandom
            edgeConnectivityRandom = 0
            edgeJumpRandom = 0
            edgeMultiplicity = 1
            randomizedGraph.add_edge(selectedNode1, selectedNode2, edist=nodeDistanceRandom, fdist=filamentLengthRandom, weight=edgeWeightRandom, capa=edgeCapacityRandom, lgth=edgeLengthRandom, conn=edgeConnectivityRandom, jump=edgeJumpRandom, multi=edgeMultiplicity)
            edgeBinsRandom[selectedNode1, selectedNode2] =- 9999
            edgeBinsRandom[selectedNode2, selectedNode1] =- 9999
    else:
        edgeProperties = np.array([property for node1, node2, property in graph.edges(data=True)])
        random.shuffle(edgeProperties)
        randomizedGraph = graph.copy()
        for index, (node1, node2, properties) in enumerate(randomizedGraph.edges(data=True)):
            for key in properties.keys():
                properties[key] = edgeProperties[index][key]
        nodePositionsRandom = nodePositions
    return(randomizedGraph, nodePositionsRandom)
                    )


output_dir = path.join(mypath, "splits_csv")
if not path.exists(output_dir):
    os.makedirs(output_dir)

for n in xrange(ntrials):
    
    for name, (tk, tv), (dk, dv) in zip(itype_name_l,
                                        target_d.iteritems(),
                                        distractor_d.iteritems()):

        assert tk == dk

        random.shuffle(tv)
        random.shuffle(dv)

        train_pos_l = [(elt, '+1', 'train') for elt in tv[:len(tv)/2]]
        assert len(train_pos_l) == nsamples
        train_neg_l = [(elt, '-1', 'train') for elt in dv[:len(tv)/2]]
        assert len(train_neg_l) == nsamples

        test_pos_l = [(elt, '+1', 'test') for elt in tv[len(tv)/2:]]
        assert len(test_pos_l) == nsamples
        test_neg_l = [(elt, '-1', 'test') for elt in dv[len(tv)/2:]]
        assert len(test_neg_l) == nsamples


        traintest_l = train_pos_l + train_neg_l \
                      + test_pos_l + test_neg_l
from scipy import random
from tables import *
import sys

in_h5 = openFile(sys.argv[1])
out_h5 = openFile(sys.argv[2], "w")

normalized_ratings_table = in_h5.root.training # TODO: change to normalized

training_table = out_h5.createTable("/", "training", normalized_ratings_table.description)
validation_table = out_h5.createTable("/", "validation", normalized_ratings_table.description)

max_user_number = normalized_ratings_table.cols.user[:].max()

for user_number in xrange(max_user_number + 1):
    this_users_ratings = normalized_ratings_table.readWhere("user==%iL" % user_number)

    random.shuffle(this_users_ratings)

    training_table.append(this_users_ratings[4:])
    validation_table.append(this_users_ratings[0:5])

    if user_number % 1000 == 0: print user_number



Beispiel #11
0
from scipy import random
from tables import *
import sys

in_h5 = openFile(sys.argv[1])
out_h5 = openFile(sys.argv[2], "w")

normalized_ratings_table = in_h5.root.training  # TODO: change to normalized

training_table = out_h5.createTable("/", "training",
                                    normalized_ratings_table.description)
validation_table = out_h5.createTable("/", "validation",
                                      normalized_ratings_table.description)

max_user_number = normalized_ratings_table.cols.user[:].max()

for user_number in xrange(max_user_number + 1):
    this_users_ratings = normalized_ratings_table.readWhere("user==%iL" %
                                                            user_number)

    random.shuffle(this_users_ratings)

    training_table.append(this_users_ratings[4:])
    validation_table.append(this_users_ratings[0:5])

    if user_number % 1000 == 0: print user_number
def main(_):

    # im_feats= tf.placeholder(tf.float32, shape=[None])
    # sent_feats = tf.placeholder(tf.float32, shape=[None])

    # visual = pd.read_csv('E:/UETGen/2/ganTest/featFiles/faceTest_4096.csv', header=None, sep= ',')
    # img_train = visual.loc[0].to_numpy()
    # textual = pd.read_csv('E:/UETGen/2/train_test/voiceTest.csv', header=None, sep=',')
    # voice_train = textual.loc[0].to_numpy()
    # train_label = 0

    im_feat_dim = 4096
    sent_feat_dim = 1024

    train_file = 'E:/saqlain/face_test.csv'
    train_file_voice = 'E:/saqlain/wav_test.csv'

    # test_file = '/home/shah/pycharm-projects/mlp/train_test/faceTest.csv'
    # test_file_voice = '/home/shah/pycharm-projects/mlp/train_test/voiceTest.csv'

    img_train, labels_faces = read_file_org(train_file)
    voice_train, labels_voices = read_file_org(train_file_voice)
    # img_test, test_label = read_file_org_test(test_file)
    # img_test_voice, _ = read_file_org_test(test_file_voice)

    img_train, voice_train, pair_labels = create_pairs(img_train, voice_train,
                                                       labels_faces,
                                                       labels_voices)

    le = preprocessing.LabelEncoder()
    le.fit(pair_labels)
    train_label = le.transform(pair_labels)

    print("Train file length", len(img_train))
    #print("Test file length", len(img_test))
    print("Train label length", len(train_label))
    #print("Test label length", len(test_label))

    # mean_data_img_train = np.mean(img_train, axis=0)
    # mean_data_voice_train = np.mean(voice_train, axis=0)

    combined = list(zip(img_train, voice_train, train_label))
    random.shuffle(combined)
    img_train[:], voice_train, train_label[:] = zip(*combined)

    # Load data.
    steps_per_epoch = len(voice_train) // FLAGS.batch_size
    # num_steps = steps_per_epoch * FLAGS.max_num_epoch

    # im_feat_plh = tf.placeholder(tf.float32, shape=[None])
    # sent_feat_plh = tf.placeholder(tf.float32, shape=[None])
    # Setup placeholders for input variables.
    im_feat_plh = tf.placeholder(tf.float32,
                                 shape=[FLAGS.batch_size, im_feat_dim])
    sent_feat_plh = tf.placeholder(tf.float32,
                                   shape=[FLAGS.batch_size, sent_feat_dim])
    label_plh = tf.placeholder(tf.int64, shape=(None), name='labels')
    train_phase_plh = tf.placeholder(tf.bool)

    #exit()

    # Setup training operation.
    softmax_loss, central_loss, loss = setup_train_model(
        im_feat_plh, sent_feat_plh, train_phase_plh, label_plh, FLAGS)
    print('')
    # Setup optimizer.
    global_step = tf.Variable(0, trainable=False)
    init_learning_rate = 0.0001
    learning_rate = tf.train.exponential_decay(init_learning_rate,
                                               global_step,
                                               steps_per_epoch,
                                               0.769,
                                               staircase=True)
    optim = tf.train.AdamOptimizer(learning_rate)

    # gradients, variables = zip(*optim.compute_gradients(loss))
    # gradients, _ = tf.clip_by_global_norm(gradients, 5.0)
    # optim = optim.apply_gradients(zip(gradients, variables))

    update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
    with tf.control_dependencies(update_ops):
        train_step = optim.minimize(loss, global_step=global_step)

    # Setup model saver.
    saver = tf.train.Saver(save_relative_paths=False)

    num_train_samples = len(img_train)
    num_of_batches = (num_train_samples // FLAGS.batch_size)

    # ax = plt.subplot()
    totl_loss = []
    soft_loss = []
    cent_loss = []
    with tf.Session(config=config) as sess:
        sess.run(tf.global_variables_initializer())

        for i in range(5):
            for idx in range(num_of_batches):
                im_feats, batch_labels = get_batch(idx, FLAGS.batch_size,
                                                   train_label, img_train)
                sent_feats, _ = get_batch(idx, FLAGS.batch_size, train_label,
                                          voice_train)

                feed_dict = {
                    im_feat_plh: im_feats,
                    sent_feat_plh: sent_feats,
                    label_plh: batch_labels,
                    train_phase_plh: True,
                }

                [_, soft, cent, loss_val
                 ] = sess.run([train_step, softmax_loss, central_loss, loss],
                              feed_dict=feed_dict)

                soft_loss.append(soft)
                cent_loss.append(cent)
                totl_loss.append(loss_val)

                print('Epoch: %d Step: %d Loss: %f' % (i, idx, loss_val))
            print('Saving checkpoint at step %d' % i)
            saver.save(sess, FLAGS.save_dir, global_step=global_step)
    plt.figure(0)
    plt.title('Total Loss')
    plt.plot(totl_loss)
    print("Minimum total loss {}".format(min(totl_loss)))
    plt.figure(1)
    plt.title('Softmax Loss')
    plt.plot(soft_loss)
    print("Minimum soft loss {}".format(min(soft_loss)))
    plt.figure(2)
    plt.title('Central Loss')
    plt.plot(cent_loss)
    print("Minimum central loss {}".format(min(cent_loss)))
	def __qualification_sentences(self, sentences, n):
		'''
		Pick 'n' random sentences for qualification test
		'''
		random.shuffle(sentences)
		return sentences[0:n]
Beispiel #14
0
        centroids = np.zeros([2, nPoints], dtype=np.float32)

        # seed generators and optimize centroids
        generators = nPointSeed(gray_img, nPoints)
        centroids = optimize(generators, centroids, nPoints, imshape, radius)

        # gimme some numbers
        np.set_printoptions(precision=3)
        print "{0:.3f}".format(mean(gray_img)), col, nPoints, "stippled!"

        # append the result to the list of points:
        for cen in centroids.T:
            points.append(Point(cen, col))

    # randomise order of points to plot
    random.shuffle(points)

    # Make a pdf surface
    surf = cairo.PDFSurface(
        open(filename.split(".")[0] + str(K) + 'point.pdf', "w"), img.shape[1],
        img.shape[0])
    # Make a svg surface
    # surf =  cairo.SVGSurface(open("test.svg", "w"), img.shape[1], img.shape[0])

    # Get a context object from Cairo
    ctx = cairo.Context(surf)
    # Black background
    ctx.rectangle(0.0, 0.0, img.shape[1], img.shape[2])
    ctx.set_source_rgb(0.0, 0.0, 0.5)
    ctx.fill()
    # Add circles
Beispiel #15
0
    def run(self):
        raw_file = open("res_siiis.txt", "w+")
        while len(self.i1AgentList) > 0 or len(self.i2AgentList) > 0 or len(self.i3AgentList) > 0:
            tempI1AgentList = []
            newI1 = 0

            tempI2AgentList = []
            newI2 = 0

            tempI3AgentList = []
            newI3 = 0

            for iAgent in self.i1AgentList:
                for agent in self.adjacencyList[iAgent]:
                    if agent in self.sAgentList:
                        if random.random() < self.b[0]:
                            newI1 += self.infectAgent(agent, 1)
                            tempI1AgentList.append(agent)

            susceptibleList = self.susceptibleAgent(1)
            for susceptibleAgent in susceptibleList:
                self.i1AgentList.remove(susceptibleAgent)
                self.sAgentList.append(susceptibleAgent)

            self.i1AgentList.extend(tempI1AgentList)

            for iAgent in self.i2AgentList:
                for agent in self.adjacencyList[iAgent]:
                    if agent in self.sAgentList:
                        if random.random() < self.b[1]:
                            newI2 += self.infectAgent(agent, 2)
                            tempI2AgentList.append(agent)

            susceptibleList = self.susceptibleAgent(2)
            for susceptibleAgent in susceptibleList:
                self.i2AgentList.remove(susceptibleAgent)
                self.sAgentList.append(susceptibleAgent)

            self.i2AgentList.extend(tempI2AgentList)

            for iAgent in self.i3AgentList:
                for agent in self.adjacencyList[iAgent]:
                    if agent in self.sAgentList:
                        if random.random() < self.b[2]:
                            newI3 += self.infectAgent(agent, 3)
                            tempI3AgentList.append(agent)

            susceptibleList = self.susceptibleAgent(3)
            for susceptibleAgent in susceptibleList:
                self.i3AgentList.remove(susceptibleAgent)
                self.sAgentList.append(susceptibleAgent)

            self.i3AgentList.extend(tempI3AgentList)

            self.sList.append(len(self.sAgentList))
            self.i1List.append(len(self.i1AgentList))
            self.i2List.append(len(self.i2AgentList))
            self.i3List.append(len(self.i3AgentList))
            self.newI1List.append(newI1)
            self.newI2List.append(newI2)
            self.newI3List.append(newI3)

            self.t += 1

            print(
                "t",
                self.t,
                "numS",
                len(self.sAgentList),
                "numI1",
                len(self.i1AgentList),
                "numI2",
                len(self.i2AgentList),
                "numI3",
                len(self.i3AgentList),
            )
            raw_file.write(
                str(self.t)
                + " "
                + str(len(self.sAgentList))
                + " "
                + str(len(self.i1AgentList))
                + " "
                + str(len(self.i2AgentList))
                + " "
                + str(len(self.i3AgentList))
                + "\n"
            )

            random.shuffle(self.i1AgentList)
            random.shuffle(self.i2AgentList)
            random.shuffle(self.i3AgentList)
Beispiel #16
0
    def __qualification_sentences(self, sentences, n):
        '''
		Pick 'n' random sentences for qualification test
		'''
        random.shuffle(sentences)
        return sentences[0:n]