Ejemplo n.º 1
0
def copy_file_from_remote(file_path, main_hostname, username):
    """
    Copy file or folder from remote to local machine using scp. 
    This uses Paramiko's scp module and assumes that ssh keys between the two machines have been setup.
    @file_path: This should lead with the generic '~/' user sign. This makes the home path compatible
                between different machines
    @main_hostname: The name or ... of the remote machine
    @username: The username used for login to remote machine
    """
    raw_file_path = file_path
    local_file_path = os.path.expanduser(file_path)
    # if not os.path.isfile(local_file_path+filename) and main_hostname != 'localhost':
    local_parent_dir, filename = ntpath.split(local_file_path)
    if not os.path.isdir(local_parent_dir):
        print('Path to parent directory %s does not exist, creating dir.' %
              local_parent_dir)
        os.makedirs(local_parent_dir)
    print('Copying data from %s via scp...' % main_hostname)
    tic()
    # copy the data folder (and contents) from the remote machine to the parent directory on the local machine
    # parent_dir = os.path.dirname(os.path.dirname(self.local_file_path))
    # local_parent_dir = os.path.dirname(local_file_path)
    # ssh.scp_get(username, main_hostname, raw_file_path, os.path.join(file_path, os.pardir), recursive=True)
    scp_get(username,
            main_hostname,
            raw_file_path,
            local_parent_dir,
            recursive=True)
    print('Completed copying data from main host.')
    toc()
    return
def fit_model(phi, eps=0.):
    assert phi.ndim == 2, 'data has to be two-dimensional'
    assert phi.shape[1] > phi.shape[0], 'data samples have to be in columns'
    d, nsamples = phi.shape
    nij = d**2 - d  # number of coupling terms

    adata, arow, acol, b = fill_model_matrix(phi)
    a = sparse.coo_matrix((adata, (arow, acol)), (nij, nij))

    tic('matrix inversion')
    if eps > 0:
        a2 = np.dot(a.T,
                    a) + eps * nsamples * sparse.eye(nij, nij, format='coo')
        b2 = np.dot(a.todense().T, np.atleast_2d(b).T)
        # this sparse multiplication is buggy !!!!, I can't get the shape of b2 to be = (b.size,)
        b3 = b2.copy().flatten().T
        b3.shape = (b3.size, )
        k_vec = dsolve.spsolve(a2.tocsr(), b3)
        k_mat = np.zeros((d, d), complex)
        k_mat.T[np.where(np.diag(np.ones(d)) - 1)] = k_vec.ravel()
    else:
        k_vec = dsolve.spsolve(a.tocsr(), b)
        k_mat = np.zeros((d, d), complex)
        k_mat.T[np.where(np.diag(np.ones(d)) - 1)] = k_vec
    toc('matrix inversion')
    return k_mat
Ejemplo n.º 3
0
def extract(dirs, outdir, nprocs, nsegments):
    """ go through each data directory and extract features
        
        Args:
        dirs: list of directories to process
        outdir: output directory
        nprocs: number of processes used
        nsegments: number of segments each EEG signal is separated into.
        
        Return:
        None
    """
    for dir in dirs:
        utils.mkdir_p(os.path.join(outdir, os.path.basename(dir)))
        matfiles = utils.list_matfiles(dir)
        worker_args = [{
            "matfile":
            matfile,
            "csvfile":
            os.path.join(outdir,
                         os.path.basename(matfile).replace(".mat", ".csv")),
            "total":
            len(matfiles),
            "process_index":
            i,
            "nsegments":
            nsegments,
        } for i, matfile in enumerate(matfiles)]
        utils.tic()
        pool = multiprocessing.Pool(processes=nprocs)
        pool.map(worker, worker_args)
Ejemplo n.º 4
0
def fit_model(phi, eps=0.):
    assert phi.ndim == 2, 'data has to be two-dimensional'
    assert phi.shape[1] > phi.shape[0], 'data samples have to be in columns'    
    d, nsamples = phi.shape
    nij = d**2-d # number of coupling terms

    adata, arow, acol, b = fill_model_matrix(phi)
    a = sparse.coo_matrix((adata,(arow,acol)), (nij,nij))

    tic('matrix inversion')
    if eps > 0:
        a2 = np.dot(a.T,a) + eps*nsamples*sparse.eye(nij,nij,format='coo')
        b2 = np.dot(a.todense().T,np.atleast_2d(b).T)
        # this sparse multiplication is buggy !!!!, I can't get the shape of b2 to be = (b.size,)
        b3 = b2.copy().flatten().T
        b3.shape = (b3.size,)
        k_vec = dsolve.spsolve(a2.tocsr(),b3)
        k_mat = np.zeros((d,d),complex)
        k_mat.T[np.where(np.diag(np.ones(d))-1)] = k_vec.ravel()
    else:
        k_vec = dsolve.spsolve(a.tocsr(),b)
        k_mat = np.zeros((d,d),complex)
        k_mat.T[np.where(np.diag(np.ones(d))-1)] = k_vec
    toc('matrix inversion')
    return k_mat
Ejemplo n.º 5
0
def crank(V, L1, R1x, L2, R2x, dt, n, crumbs=[], callback=None):
    V = V.copy()
    dt *= 0.5

    L1e = flatten_tensor(L1)
    L1i = L1e.copy()
    R1 = np.array(R1x).T

    L2e = flatten_tensor(L2)
    L2i = L2e.copy()
    R2 = np.array(R2x)

    m = 2

    # L  = (As + Ass - r*np.eye(nspots))*-dt + np.eye(nspots)
    L1e.data *= dt
    L1e.data[m, :] += 1
    L1i.data *= -dt
    L1i.data[m, :] += 1
    R1 *= dt

    L2e.data *= dt
    L2e.data[m, :] += 1
    L2i.data *= -dt
    L2i.data[m, :] += 1
    R2 *= dt

    offsets1 = (abs(min(L1i.offsets)), abs(max(L1i.offsets)))
    offsets2 = (abs(min(L2i.offsets)), abs(max(L2i.offsets)))

    print_step = max(1, int(n / 10))
    to_percent = 100.0 / n
    utils.tic("Crank:")
    R = R1 + R2
    normal_shape = V.shape
    transposed_shape = normal_shape[::-1]
    for k in xrange(n):
        if not k % print_step:
            if isnan(V).any():
                print "Crank fail @ t = %f (%i steps)" % (dt * k, k)
                return crumbs
            print int(k * to_percent),
        if callback is not None:
            callback(V, ((n - k) * dt))
        V = (L2e.dot(V.flat).reshape(normal_shape) + R).T
        V = spl.solve_banded(offsets1, L1i.data, V.flat, overwrite_b=True)
        V = (L1e.dot(V).reshape(transposed_shape).T) + R
        V = spl.solve_banded(offsets2, L2i.data, V.flat,
                             overwrite_b=True).reshape(normal_shape)
        crumbs.append(V.copy())
    utils.toc()
    return crumbs
Ejemplo n.º 6
0
def crank(V, L1, R1x, L2, R2x, dt, n, crumbs=[], callback=None):
    V = V.copy()
    dt *= 0.5

    L1e = flatten_tensor(L1)
    L1i = L1e.copy()
    R1 = np.array(R1x).T

    L2e = flatten_tensor(L2)
    L2i = L2e.copy()
    R2 = np.array(R2x)

    m = 2

    # L  = (As + Ass - r*np.eye(nspots))*-dt + np.eye(nspots)
    L1e.data *= dt
    L1e.data[m, :] += 1
    L1i.data *= -dt
    L1i.data[m, :] += 1
    R1 *= dt

    L2e.data *= dt
    L2e.data[m, :] += 1
    L2i.data *= -dt
    L2i.data[m, :] += 1
    R2 *= dt

    offsets1 = (abs(min(L1i.offsets)), abs(max(L1i.offsets)))
    offsets2 = (abs(min(L2i.offsets)), abs(max(L2i.offsets)))

    print_step = max(1, int(n / 10))
    to_percent = 100.0 / n
    utils.tic("Crank:")
    R = R1 + R2
    normal_shape = V.shape
    transposed_shape = normal_shape[::-1]
    for k in xrange(n):
        if not k % print_step:
            if isnan(V).any():
                print "Crank fail @ t = %f (%i steps)" % (dt * k, k)
                return crumbs
            print int(k * to_percent),
        if callback is not None:
            callback(V, ((n - k) * dt))
        V = (L2e.dot(V.flat).reshape(normal_shape) + R).T
        V = spl.solve_banded(offsets1, L1i.data, V.flat, overwrite_b=True)
        V = (L1e.dot(V).reshape(transposed_shape).T) + R
        V = spl.solve_banded(offsets2, L2i.data, V.flat, overwrite_b=True).reshape(normal_shape)
        crumbs.append(V.copy())
    utils.toc()
    return crumbs
Ejemplo n.º 7
0
def impl(V, L1, R1x, L2, R2x, dt, n, crumbs=[], callback=None):
    V = V.copy()

    # L1i = flatten_tensor(L1)
    L1i = L1.copy()
    R1 = np.array(R1x).T

    # L2i = flatten_tensor(L2)
    L2i = L2.copy()
    R2 = np.array(R2x)

    m = 2

    # L  = (As + Ass - H.interest_rate*np.eye(nspots))*-dt + np.eye(nspots)
    L1i.data *= -dt
    L1i.data[m, :] += 1
    R1 *= dt

    L2i.data *= -dt
    L2i.data[m, :] += 1
    R2 *= dt

    offsets1 = (abs(min(L1i.offsets)), abs(max(L1i.offsets)))
    offsets2 = (abs(min(L2i.offsets)), abs(max(L2i.offsets)))

    dx = np.gradient(spots)[:,np.newaxis]
    dy = np.gradient(vars)
    X, Y = [dim.T for dim in np.meshgrid(spots, vars)]
    gradgrid = dt * coeffs[(0,1)](0, X, Y) / (dx * dy)
    gradgrid[:,0] = 0; gradgrid[:,-1] = 0
    gradgrid[0,:] = 0; gradgrid[-1,:] = 0

    print_step = max(1, int(n / 10))
    to_percent = 100.0 / n
    utils.tic("Impl:")
    for k in xrange(n):
        if not k % print_step:
            if np.isnan(V).any():
                print "Impl fail @ t = %f (%i steps)" % (dt * k, k)
                return crumbs
            print int(k * to_percent),
        if callback is not None:
            callback(V, ((n - k) * dt))
        Vsv = np.gradient(np.gradient(V)[0])[1] * gradgrid
        V = spl.solve_banded(offsets2, L2i.data,
                             (V + Vsv + R2).flat, overwrite_b=True).reshape(V.shape)
        V = spl.solve_banded(offsets1, L1i.data,
                             (V + R1).T.flat, overwrite_b=True).reshape(V.shape[::-1]).T
    crumbs.append(V.copy())
    utils.toc()
    return crumbs
Ejemplo n.º 8
0
Archivo: NN.py Proyecto: Orenmc/NN
    def train(self):
        data = self.train_data
        labels = self.train_labels
        for epoch in range(self.epochs):
            utils.tic()
            total_loss = 0.0  # every epoch loss should start with zero
            good = 0.0
            total_size = 0.0
            # TODO: shuffle?
            data, labels = utils.shuffle(data, labels)
            for d, l in zip(data, labels):
                total_size += 1
                pred, cache = self.fprop(d)
                # check the prediction
                y_hat = np.argmax(pred)
                if y_hat == l:
                    good += 1

                err_cost = float(pred[int(l)])  # loss = -1 * log(err_cost)

                cross_entropy = utils.cross_entropy_loss(err_cost)
                if self.L2:
                    cross_entropy += utils.L2_cost(self.parameters["W"],
                                                   self.L2)
                total_loss += cross_entropy

                grads = self.bprop(cache, d, l)
                self.weights_updates(grads)

            print('epoch {}:'.format(epoch + 1))
            acc = good * 100 / total_size
            train_acc.append(acc)
            avg_loss = total_loss / total_size
            train_loss.append(avg_loss)

            print('train accuracy: {:2.2f}%'.format(acc))
            print('train AVG loss: {:2.2f}'.format(avg_loss))

            self.validation_acc()
            print('time:')
            utils.toc()
            # end of epoch
        # cache all about model
        trained_model = {
            "norm": self.norm,
            "parameters": self.parameters,
            "lr": self.lr
        }
        directory = str(len(self.hidden)) + 'Hidden/L2/'
        np.save(directory + 'model_' + self.model_name, trained_model)
        self.printGraph(directory)
Ejemplo n.º 9
0
def processImg(imgName, filename, idx, batchSize, layers, output):
    startTime = tic()
    # Initialize image and boxes
    dims = net.caffenet.InitializeImage(filename, IMG_DIM, ImageNetMean,
                                        CROP_SIZE)
    boxes = multiScaleBoxes(dims, CROP_SIZE)
    # Write index file
    [idx.write(imgName + ' ' + ' '.join(map(str, b)) + '\n') for b in boxes]
    #Prepare boxes, make sure that extra rows are added to fill the last batch
    allFeat = {}
    n = len(boxes)
    for l in layers.keys():
        allFeat[l] = emptyMatrix([n, layers[l]['dim']])
    numBatches = (n + batchSize - 1) / batchSize
    boxes += [[0, 0, 0, 0] for x in range(numBatches * batchSize - n)]

    for k in range(numBatches):
        s, f = k * batchSize, (k + 1) * batchSize
        e = batchSize if f <= n else n - s
        # Forward this batch
        net.caffenet.ForwardRegions(boxes[s:f], CONTEXT_PAD)  #,filename)
        outputs = net.caffenet.blobs
        f = n if f > n else f
        # Collect outputs
        for l in layers.keys():
            allFeat[l][s:f, :] = outputs[layers[l]['idx']].data[
                0:e, :, :, :].reshape([e, layers[l]['dim']])
    # Release image data
    net.caffenet.ReleaseImageData()
    # Save files for this image
    for l in layers.keys():
        saveMatrix(allFeat[l][0:n, :], output + '.' + l)
    lap = toc('GPU is done with ' + str(n) + ' boxes in:', startTime)
def fit_gen_model(phi):
    assert phi.ndim == 2, 'data has to be two-dimensional'
    assert phi.shape[1] > phi.shape[0], 'data samples have to be in columns'
    d, nsamples = phi.shape
    nij = 4 * d**2  # number of coupling terms

    adata, arow, acol, b = fill_gen_model_matrix(phi)
    a = sparse.coo_matrix((adata, (arow, acol)), (nij, nij))

    tic('matrix inversion')
    m_vec, flag = isolve.cg(a.tocsr(), b)
    # print 'exit flag = ', flag
    assert flag == 0
    m = m_vec2mat(m_vec)
    toc('matrix inversion')
    return m
Ejemplo n.º 11
0
def find_similar_subroutes_per_test_trip(test_points,
                                         train_df,
                                         k,
                                         paropts=None,
                                         verbosity=False):
    if paropts:
        print("Parallelizing with", paropts)
        partype, numpar = paropts
    else:
        partype, numpar = None, None

    timestart = utils.tic()
    test_lonlat = utils.idx_to_lonlat(test_points, format="tuples")
    max_subseqs = []
    if partype:
        # num threads or processes
        if partype == "processes":
            max_subseqs = exec_with_processes(train_df, numpar, test_lonlat, k)
        elif partype == "threads":
            max_subseqs = exec_with_threads(train_df, numpar, test_lonlat, k)
    else:
        max_subseqs = serial_execution(train_df,
                                       test_lonlat,
                                       k,
                                       verbosity=verbosity)
    if len(max_subseqs) != k:
        print("WARNING: Specified %d subseqs!" % k)
    print("Extracted %d nearest subsequences of a %d-long test tring in: %s" %
          (len(test_points), k, utils.tictoc(timestart)))
    return max_subseqs
Ejemplo n.º 12
0
def serial_execution(df, test_lonlat, k, verbosity=False):
    max_subseqs = []
    # for each trip in the training data
    for index, row in df.iterrows():
        train_points = row["points"]
        train_points = eval(train_points)
        train_lonlat = utils.idx_to_lonlat(train_points, format="tuples")
        timestart = utils.tic()
        # compute common subsequences between the test trip and the current candidate
        _, subseqs_idx_list = calc_lcss(test_lonlat, train_lonlat)
        # consider non-consequtive subroutes
        subseqs_idx = list(
            set([idx for seq in subseqs_idx_list for idx in seq]))
        elapsed = utils.tictoc(timestart)
        # sort by decr. length
        subseqs_idx.sort(reverse=True)
        # update the list of the longest subsequences
        if subseqs_idx:
            max_subseqs = update_current_maxsubseq(max_subseqs, subseqs_idx, k,
                                                   elapsed, row)
            # print("Max subseq length:",len(max_subseqs))
            #print([x[0] for x in max_subseqs])
            # print("Updated max subseqs, lens now:",[len(x[0]) for x in max_subseqs])
    if verbosity:
        print("Got %d subseqs:" % len(max_subseqs),
              [(x, y, z["tripId"]) for (x, y, z) in max_subseqs])

    #max_subseqs = check_reverse_lcss(max_subseqs, test_lonlat, k)
    if verbosity:
        print("Got %d reversed: subseqs:" % len(max_subseqs),
              [(x, y, z["tripId"]) for (x, y, z) in max_subseqs])

    return max_subseqs
Ejemplo n.º 13
0
def exec_with_threads(df, numpar, test_lonlat, k):
    max_subseqs = []
    res1 = [[] for _ in range(numpar)]
    res2 = [[] for _ in range(numpar)]
    subframes = utils.get_sub_dataframes(df, numpar)
    # assign data and start the threads
    threads = []
    timestart = utils.tic()
    for i in range(numpar):
        train_lonlat = []
        for index, row in subframes[i].iterrows():
            train_points = row["points"]
            train_points = eval(train_points)
            train_lonlat = utils.idx_to_lonlat(train_points, format="tuples")
        threads.append(
            threading.Thread(target=calc_lcss,
                             args=(test_lonlat, train_lonlat, res1, res2)))
        threads[i].start()
    # gather and merge results
    subseqs = []
    subseqs_idx = []
    for i in range(numpar):
        threads[i].join()
        subseqs += res1[i]
        subseqs_idx += res2[i]
    subseqs_idx = sorted(subseqs_idx, key=lambda x: len(x), reverse=True)
    elapsed = utils.tictoc(timestart)
    max_subseqs = update_current_maxsubseq(max_subseqs, subseqs_idx, k,
                                           elapsed, row)
    return max_subseqs
Ejemplo n.º 14
0
def processImg(info, filename, idx, batchSize, layers, output):
  startTime = tic()
  allFeat = {}
  n = len(info)
  for l in layers.keys():
    allFeat[l] = emptyMatrix([n,layers[l]['dim']])
  numBatches = (n + batchSize - 1) / batchSize
  # Write the index file
  [idx.write(b[4]) for b in info]
  # Prepare boxes, make sure that extra rows are added to fill the last batch
  boxes = [x[:-1] for x in info] + [ [0,0,0,0] for x in range(numBatches * batchSize - n) ]
  # Initialize the image
  net.caffenet.InitializeImage(filename, ImageNetMean)
  for k in range(numBatches):
    s,f = k*batchSize,(k+1)*batchSize
    e = batchSize if f <= n else n-s
    # Forward this batch
    net.caffenet.ForwardRegions(boxes[s:f])
    #outputBlobs = [ np.empty((batch, 1000, 1, 1), dtype=np.float32) ]
    #net.caffenet.ForwardRegions(boxes[s:f],filename, outputBlobs)
    #print outputBlobs[0][0].shape, np.argmax(outputBlobs[0][0]), np.max(outputBlobs[0][0])
    outputs =  net.caffenet.blobs()
    f = n if f > n else f
    # Collect outputs
    for l in layers.keys():
      allFeat[l][s:f,:] = outputs[layers[l]['idx']].data[0:e,:,:,:].reshape([e,layers[l]['dim']])
  # Release image data
  net.caffenet.ReleaseImageData()
  # Save files for this image
  for l in layers.keys():
    saveMatrix(allFeat[l][0:n,:],output+'.'+l)
  lap = toc('GPU is done with '+str(len(info))+' boxes in:',startTime)
 def construct_tree(self):
     while tic() - self.timer < 1.5:  # timer, exit if exceeds 1.5 s
         x_rand = self.sample_free()
         x_nearest, edge_nearest = self.nearest(x_rand)
         x_new = self.steer(x_nearest, x_rand, edge_nearest)
         if x_new is None:
             continue
         X_near = self.near(x_new, min(self.r, self.epsilon))
         self.tree[x_new] = [
             self.tree[x_nearest][0] + dist(x_nearest, x_new), None
         ]
         # extend along a minimum-cost path
         x_min = x_nearest
         for x_near in X_near:
             if collision_free(x_near, x_new, self.blocks):
                 c = self.tree[x_near][0] + dist(x_near, x_new)
                 if c < self.tree[x_new][0]:
                     self.tree[x_new][0] = c
                     x_min = x_near
         self.add_edge(x_min, x_new)
         self.tree[x_new][1] = x_min
         # rewire the tree
         for x_near in X_near:
             if collision_free(x_near, x_new, self.blocks) and \
                     self.tree[x_new][0] + dist(x_new, x_near) < self.tree[x_near][0]:
                 self.delete_edge(x_near, self.tree[x_near][1])
                 self.tree[x_near][1] = x_new
                 self.add_edge(x_new, x_near)
         # check if goal in tree:
         if self.goal in self.tree:
             self.map_completed = True
             return
Ejemplo n.º 16
0
def timing_benchmark(eval_dim=None,dims=[2, 4, 6, 8, 10],nsamps=10**4):
    ind = 0
    t = np.zeros(len(dims),float)
    for d in dims:
        print 'Benchmarking dim: ', d
        phi = 2*np.pi*np.random.rand(d,nsamps)

        tic('fit parameters')
        c_inv = fit_model(phi)
        t[ind] = toc('fit parameters')
        ind += 1
    pol = np.polyfit(dims[1:],t[1:],3)
    if eval_dim:
        print np.polyval(pol,eval_dim)
    else:
        return pol
Ejemplo n.º 17
0
def train(features,
          targets,
          num_folds,
          classifiers,
          output_folder,
          seed=None,
          filename_tag="",
          classifier_obj=None):
    kf = KFold(n_splits=num_folds, random_state=seed)
    folds_idxs = list(kf.split(features))
    trips_array = np.asarray(features)
    targets = np.asarray(targets)

    if type(classifiers) != list:
        classifiers = [classifiers]
    # train/val accuracies
    accuracies = {}
    mean_accuracies = {}
    # classify
    for classifier in classifiers:
        classif_start = utils.tic()
        accuracies[classifier] = []
        print("\nTesting classifier [%s]" % classifier)
        # train & test each classifier
        # for each fold
        for i, (train_idx, val_idx) in enumerate(folds_idxs):
            print("\tClassifying fold %d/%d" % (i + 1, len(folds_idxs)),
                  end=" ")
            train = (trips_array[train_idx], targets[train_idx])
            val = (trips_array[val_idx], targets[val_idx])
            if classifier == "knn":
                k = 5
                accTrain, accVal = knn_classification(
                    train, val, k, classifier_obj=classifier_obj)
            elif classifier == "logreg":
                accTrain, accVal = logreg_classification(
                    train, val, classifier_obj=classifier_obj)
            elif classifier == "randfor":
                accTrain, accVal = randfor_classification(
                    train, val, seed, classifier_obj=classifier_obj)
            accuracies[classifier].append((accTrain, accVal))
            print("- accuracies train/val:", accuracies[classifier][-1])
        elapsed = utils.tictoc(classif_start)
        print("Done in:", elapsed)

        # accuracy across all folds
        mean_accuracies[classifier] = [np.mean([x[0] for x in accuracies[classifier]]), \
                                       np.mean([x[1] for x in accuracies[classifier]])]
        titlestr = "%s, overall accuracy train/val: %s" % (
            classifier, str(mean_accuracies[classifier]))
        chart_filename = os.path.join(
            output_folder, classifier + "_" + filename_tag + "_chart")
        utils.barchart(list(range(1, num_folds + 1)),
                       accuracies[classifier],
                       title=titlestr,
                       ylabel="accuracy",
                       legend=["train", "val"],
                       save=chart_filename)

    return mean_accuracies
Ejemplo n.º 18
0
def processImg(info, filename, idx, batchSize, layers):
    startTime = tic()
    allFeat = {}
    n = len(info)
    for l in layers.keys():
        allFeat[l] = emptyMatrix([n, layers[l]['dim']])
    numBatches = (n + batchSize - 1) / batchSize
    # Write the index file
    [idx.write(b[4]) for b in info]
    # Prepare boxes, make sure that extra rows are added to fill the last batch
    boxes = [x[:-1]
             for x in info] + [[0, 0, 0, 0]
                               for x in range(numBatches * batchSize - n)]
    # Initialize the image
    net.caffenet.InitializeImage(filename, IMG_DIM, ImageNetMean, CROP_SIZE)
    for k in range(numBatches):
        s, f = k * batchSize, (k + 1) * batchSize
        e = batchSize if f <= n else n - s
        # Forward this batch
        net.caffenet.ForwardRegions(boxes[s:f], CONTEXT_PAD)  #,filename)
        outputs = net.caffenet.blobs
        f = n if f > n else f
        # Collect outputs
        for l in layers.keys():
            allFeat[l][s:f, :] = outputs[layers[l]['idx']].data[
                0:e, :, :, :].reshape([e, layers[l]['dim']])
    # Release image data
    net.caffenet.ReleaseImageData()
    # Return features of boxes for this image
    for l in layers.keys():
        allFeat[l] = allFeat[l][0:n, :]
    lap = toc('GPU is done with ' + str(len(info)) + ' boxes in:', startTime)
    return allFeat
Ejemplo n.º 19
0
def fill_model_matrix(phi):
    z = np.concatenate((np.exp(1j*phi),np.exp(-1j*phi)))
    d, nsamples = phi.shape
    z.shape = (2,d,nsamples)
    nij = d**2-d # number of coupling terms
    na = 4*d**3-10*d**2+6*d # upper bound for number of elements in sparse matrix
    adata = np.zeros(na,complex)
    arow = np.zeros(na,int)
    acol = np.zeros(na,int)
    b = np.zeros(nij,complex)

    tic('weave')
    weave.inline(phasemodel_code_blitz, ['z','adata','arow','acol','b'],
                 type_converters=weave.converters.blitz)
    toc('weave')
    return adata, arow, acol, b
Ejemplo n.º 20
0
def fit_gen_model(phi):
    assert phi.ndim == 2, 'data has to be two-dimensional'
    assert phi.shape[1] > phi.shape[0], 'data samples have to be in columns'
    d, nsamples = phi.shape
    nij = 4*d**2 # number of coupling terms

    adata, arow, acol, b = fill_gen_model_matrix(phi)
    a = sparse.coo_matrix((adata,(arow,acol)), (nij,nij))

    tic('matrix inversion')
    m_vec,flag = isolve.cg(a.tocsr(),b)
    # print 'exit flag = ', flag
    assert flag==0
    m = m_vec2mat(m_vec)
    toc('matrix inversion')
    return m
def timing_benchmark(eval_dim=None, dims=[2, 4, 6, 8, 10], nsamps=10**4):
    ind = 0
    t = np.zeros(len(dims), float)
    for d in dims:
        print 'Benchmarking dim: ', d
        phi = 2 * np.pi * np.random.rand(d, nsamps)

        tic('fit parameters')
        c_inv = fit_model(phi)
        t[ind] = toc('fit parameters')
        ind += 1
    pol = np.polyfit(dims[1:], t[1:], 3)
    if eval_dim:
        print np.polyval(pol, eval_dim)
    else:
        return pol
Ejemplo n.º 22
0
def question_c(features_file, grid_file, test_file, output_folder, seed,
               classif_file, num_folds):
    total_start = utils.tic()
    df_features = pd.read_csv(features_file)
    features, jid_mapping, targets = jcp.preprocess_train_data(
        df_features, seed)
    classifiers = ["knn", "logreg", "randfor"]
    # classifiers = ["randfor"]
    mean_accuracies = jcp.train(features,
                                targets,
                                num_folds,
                                classifiers,
                                output_folder,
                                seed=seed)

    # print mean accuracy per classifier
    print()
    for classifier in mean_accuracies:
        print(classifier, "accuracy train/val:", mean_accuracies[classifier])

    # select the random forest algorithm to beat the benchmark
    impr_classifier_name = "randfor"
    baseline_accuracy = mean_accuracies[impr_classifier_name][-1]

    print()
    print("Improving classification for classifier", impr_classifier_name)
    best_classifier, best_technique, best_accuracy = jcp.improve_randfor(
        baseline_accuracy, features_file, num_folds, output_folder,
        impr_classifier_name, seed)
    jcp.test(best_classifier, best_technique, test_file, grid_file,
             jid_mapping, classif_file)
    elapsed = utils.tictoc(total_start)
    print("Done in:", elapsed)
def processImg(info, filename, batchSize, layers):
  startTime = tic()
  allFeat = {}
  n = len(info)
  for l in layers.keys():
    allFeat[l] = emptyMatrix([n,layers[l]['dim']])
  numBatches = (n + batchSize - 1) / batchSize
  # Prepare boxes, make sure that extra rows are added to fill the last batch
  boxes = [x[:-1] for x in info] + [ [0,0,0,0] for x in range(numBatches * batchSize - n) ]
  # Initialize the image
  net.caffenet.InitializeImage(filename, IMG_DIM, ImageNetMean, CROP_SIZE)
  for k in range(numBatches):
    s,f = k*batchSize,(k+1)*batchSize
    e = batchSize if f <= n else n-s
    # Forward this batch
    net.caffenet.ForwardRegions(boxes[s:f],CONTEXT_PAD)
    outputs = net.caffenet.blobs
    f = n if f > n else f
    # Collect outputs
    for l in layers.keys():
      allFeat[l][s:f,:] = outputs[layers[l]['idx']].data[0:e,:,:,:].reshape([e,layers[l]['dim']])
  # Release image data
  net.caffenet.ReleaseImageData()
  # Return features of boxes for this image
  for l in layers.keys():
    allFeat[l] = allFeat[l][0:n,:]
  lap = toc('GPU is done with '+str(len(info))+' boxes in:',startTime)
  return allFeat
def processImg(imgName, filename, idx, batchSize, layers, output):
  startTime = tic()
  # Initialize image and boxes
  dims = net.caffenet.InitializeImage(filename, IMG_DIM, ImageNetMean, CROP_SIZE)
  boxes = multiScaleBoxes(dims, CROP_SIZE)
  # Write index file
  [idx.write(imgName + ' ' + ' '.join(map(str,b)) + '\n') for b in boxes]
  #Prepare boxes, make sure that extra rows are added to fill the last batch
  allFeat = {}
  n = len(boxes)
  for l in layers.keys():
    allFeat[l] = emptyMatrix([n,layers[l]['dim']])
  numBatches = (n + batchSize - 1) / batchSize
  boxes += [ [0,0,0,0] for x in range(numBatches * batchSize - n) ]

  for k in range(numBatches):
    s,f = k*batchSize,(k+1)*batchSize
    e = batchSize if f <= n else n-s
    # Forward this batch
    net.caffenet.ForwardRegions(boxes[s:f],CONTEXT_PAD) #,filename)
    outputs =  net.caffenet.blobs
    f = n if f > n else f
    # Collect outputs
    for l in layers.keys():
      allFeat[l][s:f,:] = outputs[layers[l]['idx']].data[0:e,:,:,:].reshape([e,layers[l]['dim']])
  # Release image data
  net.caffenet.ReleaseImageData()
  # Save files for this image
  for l in layers.keys():
    saveMatrix(allFeat[l][0:n,:],output+'.'+l)
  lap = toc('GPU is done with '+str(n)+' boxes in:',startTime)
Ejemplo n.º 25
0
 def train(self):
     networkFile = config.get('networkDir') + config.get(
         'snapshotPrefix') + '_iter_' + config.get(
             'trainingIterationsPerBatch') + '.caffemodel'
     interactions = config.geti('trainInteractions')
     minEpsilon = config.getf('minTrainingEpsilon')
     epochSize = len(self.environment.imageList) / 1
     epsilon = 1.0
     self.controller.setEpsilonGreedy(epsilon,
                                      self.environment.sampleAction)
     epoch = 1
     exEpochs = config.geti('explorationEpochs')
     while epoch <= exEpochs:
         s = cu.tic()
         print 'Epoch', epoch, ': Exploration (epsilon=1.0)'
         self.runEpoch(interactions, len(self.environment.imageList))
         self.task.flushStats()
         self.doValidation(epoch)
         s = cu.toc('Epoch done in ', s)
         epoch += 1
     self.learner = QLearning()
     self.agent.learner = self.learner
     egEpochs = config.geti('epsilonGreedyEpochs')
     while epoch <= egEpochs + exEpochs:
         s = cu.tic()
         epsilon = epsilon - (1.0 - minEpsilon) / float(egEpochs)
         if epsilon < minEpsilon: epsilon = minEpsilon
         self.controller.setEpsilonGreedy(epsilon,
                                          self.environment.sampleAction)
         print 'Epoch', epoch, '(epsilon-greedy:{:5.3f})'.format(epsilon)
         self.runEpoch(interactions, epochSize)
         self.task.flushStats()
         self.doValidation(epoch)
         s = cu.toc('Epoch done in ', s)
         epoch += 1
     maxEpochs = config.geti('exploitLearningEpochs') + exEpochs + egEpochs
     while epoch <= maxEpochs:
         s = cu.tic()
         print 'Epoch', epoch, '(exploitation mode: epsilon={:5.3f})'.format(
             epsilon)
         self.runEpoch(interactions, epochSize)
         self.task.flushStats()
         self.doValidation(epoch)
         s = cu.toc('Epoch done in ', s)
         shutil.copy(networkFile, networkFile + '.' + str(epoch))
         epoch += 1
Ejemplo n.º 26
0
 def load_data(self, file_path, data_loading_fn, *args, **kwargs):
     # print(os.path.join(data_path,filename))
     local_file_path = os.path.expanduser(file_path) 
     self.file_path = local_file_path
     # print(os.path.isfile(os.path.join(local_file_path,filename)))
     print('loading file: %s' %file_path)
     if not os.path.isfile(local_file_path) and self.main_hostname != 'localhost':
         print('Copying data from remote machine...')
         tic()
         ssh.copy_file_from_remote(file_path, self.main_hostname, self.username)
         print('Completed copying data from main host.')
         toc()
     print('Loading data...')
     tic()
     data = data_loading_fn(local_file_path, *args, **kwargs)
     toc()
     return data
Ejemplo n.º 27
0
def fill_gen_model_matrix(phi):
    d, nsamples = phi.shape
    x = p2torus(phi)
    q = p2dtorus(phi)
    x.shape = (d,2,nsamples)
    q.shape = (d,2,nsamples)

    nij = 4*d**2 # number of coupling terms
    na = 32*d**3 - 16*d**2 # number of elements in large matrix multiplying mij
    adata = np.zeros(na,float)
    arow = np.zeros(na,int)
    acol = np.zeros(na,int)
    b = np.zeros(nij,float)

    tic('weave')
    weave.inline(gen_phasemodel_code, ['x','q','adata','arow','acol','b'])
    toc('weave')
    return adata, arow, acol, b
Ejemplo n.º 28
0
    def segment(self):
        if self.structural_image is None:
            raise ValueError('No structural image provided!')

        t = tic()
        print('Starting segmentation on a provided template...')
        self._extract_rois_caiman(self.structural_image)
        ptoc(t, start_string='done in')
        print('found ' + str(self.Ain.shape[1]) + ' cells')
    def continue_planning(self):
        self.timer = tic()
        node_j, opened, closed, closed_pre = self.continue_plan_data
        while True:
            if tic() - self.timer > 1.5:
                self.continue_plan_data = [
                    copy.deepcopy(node_j),
                    copy.deepcopy(opened),
                    copy.deepcopy(closed),
                    copy.deepcopy(closed_pre)
                ]
                return self.start

            # remove node_i with smallest f_i and put into closed
            node_i = None
            f = np.inf
            for i_rp in opened:
                f_i = opened[i_rp].g + opened[i_rp].h
                if f_i < f:
                    f = f_i
                    node_i = opened[i_rp]
            node_i_rp = tuple(node_i.pos)
            opened.pop(node_i_rp)
            closed[node_i_rp] = node_i

            if tuple(node_j.pos) in closed:
                break

            for newrp in self.connectivity_table[node_i_rp]:
                if newrp not in closed_pre or newrp in closed:
                    continue
                if newrp not in opened:
                    opened[newrp] = closed_pre[newrp]
                    opened[newrp].h = self.h(newrp)
                else:
                    node_newrp_j = node_i.g + dist(newrp, node_i_rp)
                    if opened[newrp].g > node_newrp_j:
                        opened[newrp].g = node_newrp_j
                        opened[newrp].parent = node_i_rp
        cur_node = closed[tuple(node_j.pos)]
        while cur_node.parent != tuple(self.start):
            cur_node = closed_pre[cur_node.parent]
        self.to_be_continued = False
        return cur_node.pos
Ejemplo n.º 30
0
def impl(V, L1, R1x, L2, R2x, dt, n, crumbs=[], callback=None):
    V = V.copy()

    L1i = flatten_tensor(L1)
    R1 = np.array(R1x).T

    L2i = flatten_tensor(L2)
    R2 = np.array(R2x)

    m = 2

    # L  = (As + Ass - r*np.eye(nspots))*-dt + np.eye(nspots)
    L1i.data *= -dt
    L1i.data[m, :] += 1
    R1 *= dt

    L2i.data *= -dt
    L2i.data[m, :] += 1
    R2 *= dt

    offsets1 = (abs(min(L1i.offsets)), abs(max(L1i.offsets)))
    offsets2 = (abs(min(L2i.offsets)), abs(max(L2i.offsets)))

    print_step = max(1, int(n / 10))
    to_percent = 100.0 / n
    utils.tic("Impl:")
    for k in xrange(n):
        if not k % print_step:
            if isnan(V).any():
                print "Impl fail @ t = %f (%i steps)" % (dt * k, k)
                return crumbs
            print int(k * to_percent),
        if callback is not None:
            callback(V, ((n - k) * dt))
        V = spl.solve_banded(offsets2,
                             L2i.data, (V + R2).flat,
                             overwrite_b=True).reshape(V.shape)
        V = spl.solve_banded(offsets1,
                             L1i.data, (V + R1).T.flat,
                             overwrite_b=True).reshape(V.shape[::-1]).T
    crumbs.append(V.copy())
    utils.toc()
    return crumbs
Ejemplo n.º 31
0
 def runEpoch(self, interactions, maxImgs):
     img = 0
     s = cu.tic()
     while img < maxImgs:
         self.experiment.doInteractions(interactions)
         self.agent.learn()
         self.agent.reset()
         self.environment.loadNextEpisode()
         img += 1
     s = cu.toc('Run epoch with ' + str(maxImgs) + ' episodes', s)
Ejemplo n.º 32
0
 def segment_mm3d(self):
     """
     Performs makeMasks3D segmentation.
     """
     t = tic()
     print('Starting makeMasks3D segmentation...', end=' ')
     image = self.prep_mm3d_template(
         glob('E:/caiman_scratch/template/*.mat')[0])
     self._extract_rois_caiman(image)
     ptoc(t, start_string='done in')
 def runEpoch(self, interactions, maxImgs):
   img = 0
   s = cu.tic()
   while img < maxImgs:
     self.experiment.doInteractions(interactions)
     self.agent.learn()
     self.agent.reset()
     self.environment.loadNextEpisode()
     img += 1
   s = cu.toc('Run epoch with ' + str(maxImgs) + ' episodes', s)
Ejemplo n.º 34
0
 def run(self, image, features, boxes):
     s = cu.tic()
     result = {}
     boxSet = [map(float, b[1:]) for b in boxes]
     for i in self.catIndex:
         scores = features[:, i]
         fb, fs = det.nonMaximumSuppression(boxSet, scores, self.maxOverlap)
         result[i] = (image, fb, fs)
     s = cu.toc(image, s)
     return result
Ejemplo n.º 35
0
 def run(self, image, features, boxes):
   s = cu.tic()
   result = {}
   boxSet = [ map(float, b[1:]) for b in boxes ]
   for i in self.catIndex:
     scores = features[:,i]
     fb,fs = det.nonMaximumSuppression(boxSet, scores, self.maxOverlap)
     result[i] = (image, fb, fs)
   s = cu.toc(image, s)
   return result
Ejemplo n.º 36
0
def mainLoop(modelType, modelArgs, positives, trainingList, featuresDir,
             featuresExt, modelOut, maxNegOverlap, iter):
    pos, posIdx, featSize, fmSize = positives
    featureSpace = pos.shape[1]
    startTime = cu.tic()
    if iter == 0:
        ## Random Negatives
        print ' >>> RANDOM NEGATIVES'
        N, negIdx = learn.getRandomNegs(featuresDir, trainingList, featuresExt,
                                        featSize, maxVectorsCache,
                                        maxNegativeImages)
        cellsPerImage = featSize / featureSpace
        N = N.reshape((N.shape[0], featureSpace, fmSize,
                       fmSize))  # Recover original feature layout
        neg = np.zeros((cellsPerImage * N.shape[0], featureSpace))
        for i in range(N.shape[0]):
            neg[i * cellsPerImage:(i + 1) * cellsPerImage] = N[i].T.reshape(
                (cellsPerImage, featureSpace))  # Unfold features
        hards = {'features': np.zeros((0, neg.shape[1])), 'index': []}
        lap = cu.toc(
            'Random negatives matrix (' + str(neg.shape[0]) + ' instances)',
            startTime)
    else:
        ## Mine hard negatives
        print ' >>> MINING HARD NEGATIVES'
        model = det.createDetector(modelType, modelArgs)
        model.load(modelOut + '.' + str(iter - 1))
        detList, detMatrix = maskDetector.detectObjects(
            model, trainingList, featuresDir, featuresExt, -10.0)
        hdnList, detMatrix = maskDetector.selectHardNegatives(
            detList, detMatrix, posIdx, maxNegativeVectors)
        neg = maskDetector.loadHardNegativesFromMatrix(featuresDir, hdnList,
                                                       detMatrix, featuresExt,
                                                       featureSpace,
                                                       maxNegativeVectors)
        hards = cu.loadMatrixNoCompression(modelOut + '.hards').item()
        lap = cu.toc(
            'Hard negatives (' + str(neg.shape[0]) + ' mined + ' +
            str(hards['features'].shape[0]) + ' previous instances)',
            startTime)

    ## Learn Detector
    neg = np.concatenate((neg, hards['features']))
    clf = det.createDetector(modelType, modelArgs)
    clf.learn(pos, neg)
    clf.save(modelOut + '.' + str(iter))
    lap = cu.toc('Classifier learned:', lap)

    ## Keep hard negatives for next iterations
    scores = clf.predict(neg)
    hardNegsIdx = np.argsort(scores)
    hardNeg = np.concatenate(
        (hards['features'], neg[hardNegsIdx[-cu.topHards:]]))
    cu.saveMatrixNoCompression({'features': hardNeg}, modelOut + '.hards')
    print ' ** Iteration', iter, 'done'
Ejemplo n.º 37
0
def impl(V, L1, R1x, L2, R2x, dt, n, crumbs=[], callback=None):
    V = V.copy()

    L1i = flatten_tensor(L1)
    R1 = np.array(R1x).T

    L2i = flatten_tensor(L2)
    R2 = np.array(R2x)

    m = 2

    # L  = (As + Ass - r*np.eye(nspots))*-dt + np.eye(nspots)
    L1i.data *= -dt
    L1i.data[m, :] += 1
    R1 *= dt

    L2i.data *= -dt
    L2i.data[m, :] += 1
    R2 *= dt

    offsets1 = (abs(min(L1i.offsets)), abs(max(L1i.offsets)))
    offsets2 = (abs(min(L2i.offsets)), abs(max(L2i.offsets)))

    print_step = max(1, int(n / 10))
    to_percent = 100.0 / n
    utils.tic("Impl:")
    for k in xrange(n):
        if not k % print_step:
            if isnan(V).any():
                print "Impl fail @ t = %f (%i steps)" % (dt * k, k)
                return crumbs
            print int(k * to_percent),
        if callback is not None:
            callback(V, ((n - k) * dt))
        V = spl.solve_banded(offsets2, L2i.data,
                             (V + R2).flat, overwrite_b=True).reshape(V.shape)
        V = spl.solve_banded(offsets1, L1i.data,
                             (V + R1).T.flat, overwrite_b=True).reshape(V.shape[::-1]).T
    crumbs.append(V.copy())
    utils.toc()
    return crumbs
Ejemplo n.º 38
0
 def train(self):
   networkFile = config.get('networkDir') + config.get('snapshotPrefix') + '_iter_' + config.get('trainingIterationsPerBatch') + '.caffemodel'
   interactions = config.geti('trainInteractions')
   minEpsilon = config.getf('minTrainingEpsilon')
   epochSize = len(self.environment.imageList)/1
   epsilon = 1.0
   self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction)
   epoch = 1
   exEpochs = config.geti('explorationEpochs')
   while epoch <= exEpochs:
     s = cu.tic()
     print 'Epoch',epoch,': Exploration (epsilon=1.0)'
     self.runEpoch(interactions, len(self.environment.imageList))
     self.task.flushStats()
     self.doValidation(epoch)
     s = cu.toc('Epoch done in ',s)
     epoch += 1
   self.learner = QLearning()
   self.agent.learner = self.learner
   egEpochs = config.geti('epsilonGreedyEpochs')
   while epoch <= egEpochs + exEpochs:
     s = cu.tic()
     epsilon = epsilon - (1.0-minEpsilon)/float(egEpochs)
     if epsilon < minEpsilon: epsilon = minEpsilon
     self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction)
     print 'Epoch',epoch ,'(epsilon-greedy:{:5.3f})'.format(epsilon)
     self.runEpoch(interactions, epochSize)
     self.task.flushStats()
     self.doValidation(epoch)
     s = cu.toc('Epoch done in ',s)
     epoch += 1
   maxEpochs = config.geti('exploitLearningEpochs') + exEpochs + egEpochs
   while epoch <= maxEpochs:
     s = cu.tic()
     print 'Epoch',epoch,'(exploitation mode: epsilon={:5.3f})'.format(epsilon)
     self.runEpoch(interactions, epochSize)
     self.task.flushStats()
     self.doValidation(epoch)
     s = cu.toc('Epoch done in ',s)
     shutil.copy(networkFile, networkFile + '.' + str(epoch))
     epoch += 1
Ejemplo n.º 39
0
 def runEpoch(self, interactions, maxImgs):
     img = 0
     s = cu.tic()
     while img < maxImgs:
         k = 0
         while not self.environment.episodeDone and k < interactions:
             self.experiment._oneInteraction()
             k += 1
         self.agent.learn()
         self.agent.reset()
         self.environment.loadNextEpisode()
         img += 1
     s = cu.toc('Run epoch with ' + str(maxImgs) + ' episodes', s)
Ejemplo n.º 40
0
 def runEpoch(self, interactions, maxImgs):
   img = 0
   s = cu.tic()
   while img < maxImgs:
     k = 0
     while not self.environment.episodeDone and k < interactions:
       self.experiment._oneInteraction()
       k += 1
     self.agent.learn()
     self.agent.reset()
     self.environment.loadNextEpisode()
     img += 1
   s = cu.toc('Run epoch with ' + str(maxImgs) + ' episodes', s)
Ejemplo n.º 41
0
 def make_mmap(self, files):
     t = tic()
     print('Memory mapping current file...', end=' ')
     self.memmap = cm.save_memmap(files,
                                  base_name=f'MAP{self.fnumber}a',
                                  order='C',
                                  slices=[
                                      slice(0, -1,
                                            self.channels * self.planes),
                                      slice(0, 512),
                                      slice(self.x_start, self.x_end)
                                  ])
     print(f'done. Took {toc(t):.4f}s')
def mainLoop(modelType,modelArgs,positives,trueObjectBoxes,trainingList,featuresDir,featuresExt,modelOut,maxNegOverlap,iter):
  pos,posIdx,ari,osi = positives
  startTime = cu.tic()
  if iter == 0:
    ## Random Negatives
    print ' >>> RANDOM NEGATIVES'
    neg,negIdx = learn.getRandomNegs(featuresDir,trainingList,featuresExt,pos.shape[1],maxVectorsCache,maxNegativeImages)
    detectionsList = [ [x[0],'0.0']+x[1:]+['1'] for x in negIdx]
    hards = {'features':np.zeros((0,neg.shape[1])),'index':[]}
    lap = cu.toc('Random negatives matrix ('+str(neg.shape[0])+' instances)',startTime)
  else:
    ## Mine hard negatives
    print ' >>> MINING HARD NEGATIVES'
    model = det.createDetector(modelType,modelArgs)
    model.load(modelOut+'.'+ str( iter-1 ))
    detectionsList = detector.detectObjects(model,trainingList,featuresDir,featuresExt,0.3,-10.0)
    hards = cu.loadMatrixNoCompression(modelOut+'.hards').item()
    lap = cu.toc('Hard negatives matrix ('+str(hards['features'].shape[0])+' instances)',startTime)

  ## Rank and clean negative detections
  detectionsData = evaluation.loadDetections(detectionsList)
  groundTruth = evaluation.loadGroundTruthAnnotations(trueObjectBoxes)
  detectionsLog = evaluation.evaluateDetections(groundTruth,detectionsData,0.5,allowDuplicates=True) # overlapMeasure=validRegion,
  evaluation.computePrecisionRecall(len(posIdx),detectionsLog['tp'],detectionsLog['fp'],'tmp.txt')
  evaluation.computePrecAt(detectionsLog['tp'],[20,50,100,200,300,400,500])
  logData = learn.parseRankedDetectionsFile(detectionsLog['log'],maxNegOverlap,maxNegativeVectors)
  print ' >>> LOADING HARD NEGATIVES'
  neg,negIdx = learn.loadHardNegativesFromList(featuresDir,logData['negExamples'],featuresExt,pos.shape[1],logData['negTaken'])
  del(detectionsList,detectionsData,detectionsLog,logData)
  lap = cu.toc('Ranked negatives matrix ('+str(neg.shape[0])+' instances)',lap)
  neg = np.concatenate( (neg,hards['features']) )
  negIdx = negIdx + hards['index']

  ## Learn Detector
  clf = det.createDetector(modelType,modelArgs)
  clf.learn(pos,neg,posIdx,negIdx)
  clf.save(modelOut+'.'+str(iter))
  lap = cu.toc('Classifier learned:',lap)

  ## Keep hard negatives for next iterations
  scores = clf.predict(neg,negIdx)
  hardNegsIdx = np.argsort(scores)
  hardNeg = np.concatenate( (hards['features'], neg[hardNegsIdx[-cu.topHards:]]) )
  negIdx = hards['index'] + [negIdx[j] for j in hardNegsIdx[-cu.topHards:]]
  print 'Hard negatives:',hardNeg.shape[0]
  hards = {'features':hardNeg, 'index':negIdx}
  cu.saveMatrixNoCompression({'features':hardNeg,'index':negIdx},modelOut+'.hards')

  print ' ** Iteration',iter,'done'
  return {'detector':clf,'pos':pos,'posIdx':posIdx,'neg':neg,'negIdx':negIdx}
def computeFeatures(batch, n, data, net, layers, output):
  startTime = tic()
  allFeat = {}
  for l in layers.keys():
    allFeat[l] = emptyMatrix([n,layers[l]['dim']])
  # Extract and store CNN Features
  outputBlobs = [np.empty((batch, 1000, 1, 1), dtype=np.float32)]
  for i in range(batch,n+batch,batch):
    inputBlobs = np.empty((batch, 3, 227, 227), dtype=np.float32)
    start = i-batch
    finish = min(i,n)
    elems = finish-start
    inputBlobs[0:elems,:,:,:] = data[start:finish,:,:,:]
    net.caffenet.Forward([inputBlobs], outputBlobs)
    outputs =  net.caffenet.blobs()
    for l in layers.keys():
      allFeat[l][start:finish,:] = outputs[layers[l]['idx']].data[0:elems,:,:,:].reshape([elems,layers[l]['dim']])
  # Save files for this image
  for l in layers.keys():
    saveMatrix(allFeat[l][0:n,:],output+'.'+l)
  lap = toc('Image ready with '+str(n)+' boxes in:',startTime)
Ejemplo n.º 44
0

if __name__ == "__main__":
    import matplotlib.pyplot as plt
    from plotlib import plot_phasedist

    dim = 3

    M = np.random.randn(2 * dim, 2 * dim)
    M += M.T.copy()
    for i in np.arange(M.shape[0] / 2):
        s = M[2 * i, 2 * i] + M[2 * i + 1, 2 * i + 1]
        M[2 * i, 2 * i] -= s / 2
        M[2 * i + 1, 2 * i + 1] -= s / 2

    tic("sampling")
    nsamples = 10 ** 4
    burnin = 10 ** 3
    lf_steps = 50
    step_sz = 0.15
    phi, E, diagn = sample_hmc(M, nsamples, burnin, lf_steps, step_sz, diagnostics=True)
    toc("sampling")

    tic("fiting")
    M_hat = fit_model(phi)
    Mneg_hat, Mpos_hat = m2kappa(M_hat)
    # anti-symmetrize diagonal elements for estimation matrix
    for i in np.arange(M_hat.shape[0] / 2):
        s = M_hat[2 * i, 2 * i] + M_hat[2 * i + 1, 2 * i + 1]
        M_hat[2 * i, 2 * i] -= s / 2
        M_hat[2 * i + 1, 2 * i + 1] -= s / 2
    saveMatrix(allFeat[l][0:n,:],output+'.'+l)
  lap = toc('GPU is done with '+str(n)+' boxes in:',startTime)

def emptyMatrix(size):
  data = np.zeros(size)
  return data.astype(np.float32)

def saveMatrix(matrix,outFile):
  outf = open(outFile,'w')
  np.savez_compressed(outf,matrix)
  outf.close()

#################################
# Extract Features
#################################
startTime = tic()
totalItems = len(images)
layers = {'fc6_neuron_cudanet_out': {'dim':4096,'idx':'fc6'}, 'fc7_neuron_cudanet_out': {'dim':4096,'idx':'fc7'}}
batch = 50

print 'Extracting features for',totalItems,'total images'
for name in images:
  # Check if files already exist
  processed = 0
  for l in layers.keys():
    if os.path.isfile(outDir+'/'+name+'.'+l):
      processed += 1
  if processed == len(layers):
    continue
  # Get features for patches
  indexFile = open(outDir+'/'+name+'.idx','w')
import utils as cu
import libDetection as det
from dataProcessor import processData

class Checker():
  def __init__(self):
   print 'Starting checker' 

  def run(self,img,features,bboxes):
    return img,features.shape[0] == len(bboxes)

## Main Program Parameters
params = cu.loadParams("testImageList featuresDir featuresExt")

imageList = [x.replace('\n','') for x in open(params['testImageList'])]
## Run Detector
task = Checker()
start = cu.tic()
result = processData(imageList,params['featuresDir'],params['featuresExt'],task)
cu.toc('All images checked',start)
totalP = 0
for data in result:
  img,r = data
  if not r:
    print 'Problems with',img
    totalP += 1
print 'Total problems:',totalP
  def __init__(self, boxes):
    t = cu.tic()
    self.auxBoxes = []
    frame = [999,999,0,0]
    id = 0
    for box in boxes.tolist():
      frame = min(frame[0:2], box[0:2]) + max(frame[2:],box[2:])
      self.auxBoxes.append(Box(box, id))
      id += 1
    self.frame = map(int,frame)
    self.auxBoxes.sort(key=lambda x:x.area, reverse=True)
    self.adjacency = np.zeros( (len(self.auxBoxes),len(self.auxBoxes)) )
    for i in range(len(self.auxBoxes)):
      self.adjacency[i,i] = 1.0
      for j in range(i+1,len(self.auxBoxes)):
        iou = self.auxBoxes[i].IoU( self.auxBoxes[j] )
        self.adjacency[i,j] = iou
        self.adjacency[j,i] = iou
    
    knn = range(-2,-GRAPH_NEIGHBORS-2,-1) # Avoid last element (same box)
    self.graph = {'nodes':[], 'edges':[]}
    for i in range(len(self.auxBoxes)):
      center = self.auxBoxes[i].center()
      node = {'data':{'id':str(i), 'box':self.auxBoxes[i].box}, 
               'position':{'x':int(center[0]),'y':int(center[1])}}
      self.graph['nodes'].append(node)
      edges = {}
      neighbors = np.argsort( self.adjacency[i,:] )
      knn =  [ (j,self.adjacency[i,j]) for j in neighbors[-NEAREST_NEIGHBORS-2:-2] ]
      for j,iou in knn:
        edge = { 'data': { 'id': str(i)+'_'+str(j), 'weight': iou, 'source': str(i), 'target': str(j) } }
        self.graph['edges'].append(edge)
    
    t = cu.toc('Graph construction:',t)
    return 

    self.layout = []
    scaleRange = len(auxBoxes)/SCALES
    for s in range(SCALES):
      scaleElems = []
      self.layout.append([])
      for i in range(scaleRange):
        scaleElems.append(auxBoxes[scaleRange*s + i])
      scaleElems.sort(key=lambda x:x.box[0], reverse=True)
      horizontalRange = len(scaleElems)/HORIZONTAL_BINS
      for h in range(HORIZONTAL_BINS):
        horizontalRangeElems = []
        self.layout[s].append([])
        for j in range(horizontalRange):
          horizontalRangeElems.append(scaleElems[horizontalRange*h + j])
        horizontalRangeElems.sort(key=lambda x:x.box[1], reverse=True)
        verticalRange = len(horizontalRangeElems)/VERTICAL_BINS
        for v in range(VERTICAL_BINS):
          self.layout[s][h].append([])
          for k in range(verticalRange):
            self.layout[s][h][v].append(horizontalRangeElems[verticalRange*v + k])
    self.numBoxes = len(auxBoxes)
    self.boxesPerBin = float(self.numBoxes)/WORLD_SIZE
    self.actionCounter = 0
    self.scale = SCALES/2 # (Greedy: Set to zero)
    self.horizontal = 0
    self.vertical = 0
    self.percentExplored = 0
    self.selectedIds = []
    self.status = np.zeros( (SCALES, HORIZONTAL_BINS, VERTICAL_BINS), dtype=np.int32 )
    self.currentPosition = np.zeros( (SCALES, HORIZONTAL_BINS, VERTICAL_BINS), dtype=np.int32 )
Ejemplo n.º 48
0
      for i in range(len(self.imgBoxes[img])):
        box = map( int, self.imgBoxes[img][i,:].tolist() )
        key = img + ' ' + ' '.join( map(str, box) )
        try: score = records[key]
        except: score = -10.0
        self.scores[img][i,fileIdx] = score

  def saveDB(self, outputDir):
    for img in self.imgBoxes.keys():
      data = {'boxes':self.imgBoxes[img], 'scores':self.scores[img]}
      scipy.io.savemat(outputDir+'/'+img+'.mat', data, do_compression=True)
    out = open(outputDir+'/categories.txt','w')
    for c in self.categories:
      out.write(c + '\n')
    out.close()

if __name__ == "__main__":
  params = cu.loadParams('scoresDirectory proposalsFile outputDir')
  cu.mem('Program started')
  lap = tic()
  builder = DBBuilder(params['scoresDirectory'], params['proposalsFile'])
  lap = toc('Proposals loaded', lap)
  cu.mem('DB initialized')
  builder.parseDir()
  lap = toc('Directory parsed', lap)
  cu.mem('All files read')
  builder.saveDB(params['outputDir'])
  lap = toc('Database saved', lap)
  cu.mem('Program ends')

Ejemplo n.º 49
0
def crank(V, L1, R1x, L2, R2x, dt, n, crumbs=[], callback=None):
    V = V.copy()
    theta = 0.5
    # dt *= 0.5

    # L1e = flatten_tensor(L1)
    L1e = L1.copy()
    L1i = L1e.copy()
    R1 = np.array(R1x).T

    # L2e = flatten_tensor(L2)
    L2e = L2.copy()
    L2i = L2e.copy()
    R2 = np.array(R2x)

    # print "L var"
    # fp(L2e.data, 2)
    # print "FD op var"
    # fp(F.operators[1].data, 2)

    # print "diff"
    # fp(F.operators[1].data - L2e.data, 2, 'f')

    # assert np.allclose(F.operators[1].data, L2e.data)

    m = 2

    # L  = (As + Ass - H.interest_rate*np.eye(nspots))*-dt + np.eye(nspots)

    L1i.data *= -theta*dt
    L1i.data[m, :] += 1
    # R1 *= dt

    L2i.data *= -theta*dt
    L2i.data[m, :] += 1
    # R2 *= dt

    offsets1 = (abs(min(L1i.offsets)), abs(max(L1i.offsets)))
    offsets2 = (abs(min(L2i.offsets)), abs(max(L2i.offsets)))

    dx = np.gradient(spots)[:,np.newaxis]
    dy = np.gradient(vars)
    X, Y = [dim.T for dim in np.meshgrid(spots, vars)]
    gradgrid = dt * coeffs[(0,1)](0, X, Y) / (dx*dy)
    gradgrid[:,0] = 0; gradgrid[:,-1] = 0
    gradgrid[0,:] = 0; gradgrid[-1,:] = 0

    print_step = max(1, int(n / 10))
    to_percent = 100.0 / n
    utils.tic("Crank:")
    R = R1 + R2
    normal_shape = V.shape
    transposed_shape = normal_shape[::-1]
    for k in xrange(n):
        if not k % print_step:
            if np.isnan(V).any():
                print "Crank fail @ t = %f (%i steps)" % (dt * k, k)
                return crumbs
            print int(k * to_percent),
        if callback is not None:
            callback(V, ((n - k) * dt))

        Vsv = np.gradient(np.gradient(V)[0])[1] * gradgrid

        # V12 = (V
                # + Vsv
                # + (1-theta)*dt*L1e.dot(V.T.flat).reshape(transposed_shape).T
                # + (1-theta)*dt*L2e.dot(V.flat).reshape(normal_shape)
                # + dt * R)

        # V1 = spl.solve_banded(offsets2, L2i.data, V12.flat, overwrite_b=True).reshape(normal_shape)
        # V  = spl.solve_banded(offsets1, L1i.data, V1.T.flat, overwrite_b=True).reshape(transposed_shape).T

        V1 = (L1e.dot(V.T.flat).reshape(transposed_shape)).T
        V2 = (L2e.dot(V.flat).reshape(normal_shape))
        Y0 = V + Vsv + dt*(V1 + V2 + R)

        V1 = Y0 - theta * dt * L1e.dot(V.T.flat).reshape(transposed_shape).T
        Y1 = spl.solve_banded(offsets1, L1i.data, V1.T.flat, overwrite_b=True).reshape(transposed_shape).T

        V2 = Y1 - theta * dt * L2e.dot(V.flat).reshape(normal_shape)
        Y2 = spl.solve_banded(offsets2, L2i.data, V2.flat, overwrite_b=True).reshape(normal_shape)
        V = Y2


        crumbs.append(V.copy())
    utils.toc()
    return crumbs
Ejemplo n.º 50
0
def init(spots, nvols, k):
    return tile(np.maximum(0, spots - k), (nvols, 1)).T


V_init = init(spots, nvols, k)
V = np.copy(V_init)
# bs, delta = [x for x in bs_call_delta(spots[:, newaxis], k, r,
                                      # np.sqrt(vars)[newaxis, :], t)]

bs = BlackScholesOption(spot=spots[:, np.newaxis],
                        strike=k,
                        interest_rate=r,
                        variance=vars[np.newaxis, :],
                        tenor=t).analytical
utils.tic("Heston Analytical:")
# hss = array([hs_call(spots, k, r, np.sqrt(vars),
             # dt*i, kappa, theta, sigma, rho) for i in range(int(t/dt)+1)])
# hs = hss[-1]
hs = hs_call_vector(spots, k, r, np.sqrt(vars),
             t, kappa, theta, sigma, rho)
utils.toc()
hs[isnan(hs)] = 0.0
if max(hs.flat) > spots[-1] * 2:
    BADANALYTICAL = True
    print "Warning: Analytical solution looks like trash."

if len(sys.argv) > 1:
    if sys.argv[1] == '0':
        print "Bail out with arg 0."
        sys.exit()
Ejemplo n.º 51
0
    else:
        return pol

if __name__ == '__main__':
    import matplotlib.pyplot as plt
    from plotlib import plot_phasedist
    dim = 3

    M = np.random.randn(2*dim,2*dim)
    M += M.T.copy()
    for i in np.arange(M.shape[0]/2):
        s = M[2*i,2*i] + M[2*i+1,2*i+1]
        M[2*i,2*i]     -= s/2
        M[2*i+1,2*i+1] -= s/2

    tic('sampling')
    nsamples = 10**4
    burnin = 10**3
    lf_steps = 50
    step_sz = .15
    phi,E,diagn = sample_hmc(M,nsamples,burnin,lf_steps,step_sz,diagnostics=True)
    toc('sampling')

    tic('fiting')
    M_hat = fit_model(phi)
    Mneg_hat,Mpos_hat= m2kappa(M_hat)
    # anti-symmetrize diagonal elements for estimation matrix
    for i in np.arange(M_hat.shape[0]/2):
        s = M_hat[2*i,2*i] + M_hat[2*i+1,2*i+1]
        M_hat[2*i,2*i]     -= s/2
        M_hat[2*i+1,2*i+1] -= s/2