Exemplo n.º 1
0
def copy_file_from_remote(file_path, main_hostname, username):
    """
    Copy file or folder from remote to local machine using scp. 
    This uses Paramiko's scp module and assumes that ssh keys between the two machines have been setup.
    @file_path: This should lead with the generic '~/' user sign. This makes the home path compatible
                between different machines
    @main_hostname: The name or ... of the remote machine
    @username: The username used for login to remote machine
    """
    raw_file_path = file_path
    local_file_path = os.path.expanduser(file_path)
    # if not os.path.isfile(local_file_path+filename) and main_hostname != 'localhost':
    local_parent_dir, filename = ntpath.split(local_file_path)
    if not os.path.isdir(local_parent_dir):
        print('Path to parent directory %s does not exist, creating dir.' %
              local_parent_dir)
        os.makedirs(local_parent_dir)
    print('Copying data from %s via scp...' % main_hostname)
    tic()
    # copy the data folder (and contents) from the remote machine to the parent directory on the local machine
    # parent_dir = os.path.dirname(os.path.dirname(self.local_file_path))
    # local_parent_dir = os.path.dirname(local_file_path)
    # ssh.scp_get(username, main_hostname, raw_file_path, os.path.join(file_path, os.pardir), recursive=True)
    scp_get(username,
            main_hostname,
            raw_file_path,
            local_parent_dir,
            recursive=True)
    print('Completed copying data from main host.')
    toc()
    return
Exemplo n.º 2
0
def fit_model(phi, eps=0.):
    assert phi.ndim == 2, 'data has to be two-dimensional'
    assert phi.shape[1] > phi.shape[0], 'data samples have to be in columns'    
    d, nsamples = phi.shape
    nij = d**2-d # number of coupling terms

    adata, arow, acol, b = fill_model_matrix(phi)
    a = sparse.coo_matrix((adata,(arow,acol)), (nij,nij))

    tic('matrix inversion')
    if eps > 0:
        a2 = np.dot(a.T,a) + eps*nsamples*sparse.eye(nij,nij,format='coo')
        b2 = np.dot(a.todense().T,np.atleast_2d(b).T)
        # this sparse multiplication is buggy !!!!, I can't get the shape of b2 to be = (b.size,)
        b3 = b2.copy().flatten().T
        b3.shape = (b3.size,)
        k_vec = dsolve.spsolve(a2.tocsr(),b3)
        k_mat = np.zeros((d,d),complex)
        k_mat.T[np.where(np.diag(np.ones(d))-1)] = k_vec.ravel()
    else:
        k_vec = dsolve.spsolve(a.tocsr(),b)
        k_mat = np.zeros((d,d),complex)
        k_mat.T[np.where(np.diag(np.ones(d))-1)] = k_vec
    toc('matrix inversion')
    return k_mat
def fit_model(phi, eps=0.):
    assert phi.ndim == 2, 'data has to be two-dimensional'
    assert phi.shape[1] > phi.shape[0], 'data samples have to be in columns'
    d, nsamples = phi.shape
    nij = d**2 - d  # number of coupling terms

    adata, arow, acol, b = fill_model_matrix(phi)
    a = sparse.coo_matrix((adata, (arow, acol)), (nij, nij))

    tic('matrix inversion')
    if eps > 0:
        a2 = np.dot(a.T,
                    a) + eps * nsamples * sparse.eye(nij, nij, format='coo')
        b2 = np.dot(a.todense().T, np.atleast_2d(b).T)
        # this sparse multiplication is buggy !!!!, I can't get the shape of b2 to be = (b.size,)
        b3 = b2.copy().flatten().T
        b3.shape = (b3.size, )
        k_vec = dsolve.spsolve(a2.tocsr(), b3)
        k_mat = np.zeros((d, d), complex)
        k_mat.T[np.where(np.diag(np.ones(d)) - 1)] = k_vec.ravel()
    else:
        k_vec = dsolve.spsolve(a.tocsr(), b)
        k_mat = np.zeros((d, d), complex)
        k_mat.T[np.where(np.diag(np.ones(d)) - 1)] = k_vec
    toc('matrix inversion')
    return k_mat
Exemplo n.º 4
0
def mainLoop(modelType, modelArgs, positives, trainingList, featuresDir,
             featuresExt, modelOut, maxNegOverlap, iter):
    pos, posIdx, featSize, fmSize = positives
    featureSpace = pos.shape[1]
    startTime = cu.tic()
    if iter == 0:
        ## Random Negatives
        print ' >>> RANDOM NEGATIVES'
        N, negIdx = learn.getRandomNegs(featuresDir, trainingList, featuresExt,
                                        featSize, maxVectorsCache,
                                        maxNegativeImages)
        cellsPerImage = featSize / featureSpace
        N = N.reshape((N.shape[0], featureSpace, fmSize,
                       fmSize))  # Recover original feature layout
        neg = np.zeros((cellsPerImage * N.shape[0], featureSpace))
        for i in range(N.shape[0]):
            neg[i * cellsPerImage:(i + 1) * cellsPerImage] = N[i].T.reshape(
                (cellsPerImage, featureSpace))  # Unfold features
        hards = {'features': np.zeros((0, neg.shape[1])), 'index': []}
        lap = cu.toc(
            'Random negatives matrix (' + str(neg.shape[0]) + ' instances)',
            startTime)
    else:
        ## Mine hard negatives
        print ' >>> MINING HARD NEGATIVES'
        model = det.createDetector(modelType, modelArgs)
        model.load(modelOut + '.' + str(iter - 1))
        detList, detMatrix = maskDetector.detectObjects(
            model, trainingList, featuresDir, featuresExt, -10.0)
        hdnList, detMatrix = maskDetector.selectHardNegatives(
            detList, detMatrix, posIdx, maxNegativeVectors)
        neg = maskDetector.loadHardNegativesFromMatrix(featuresDir, hdnList,
                                                       detMatrix, featuresExt,
                                                       featureSpace,
                                                       maxNegativeVectors)
        hards = cu.loadMatrixNoCompression(modelOut + '.hards').item()
        lap = cu.toc(
            'Hard negatives (' + str(neg.shape[0]) + ' mined + ' +
            str(hards['features'].shape[0]) + ' previous instances)',
            startTime)

    ## Learn Detector
    neg = np.concatenate((neg, hards['features']))
    clf = det.createDetector(modelType, modelArgs)
    clf.learn(pos, neg)
    clf.save(modelOut + '.' + str(iter))
    lap = cu.toc('Classifier learned:', lap)

    ## Keep hard negatives for next iterations
    scores = clf.predict(neg)
    hardNegsIdx = np.argsort(scores)
    hardNeg = np.concatenate(
        (hards['features'], neg[hardNegsIdx[-cu.topHards:]]))
    cu.saveMatrixNoCompression({'features': hardNeg}, modelOut + '.hards')
    print ' ** Iteration', iter, 'done'
Exemplo n.º 5
0
def crank(V, L1, R1x, L2, R2x, dt, n, crumbs=[], callback=None):
    V = V.copy()
    dt *= 0.5

    L1e = flatten_tensor(L1)
    L1i = L1e.copy()
    R1 = np.array(R1x).T

    L2e = flatten_tensor(L2)
    L2i = L2e.copy()
    R2 = np.array(R2x)

    m = 2

    # L  = (As + Ass - r*np.eye(nspots))*-dt + np.eye(nspots)
    L1e.data *= dt
    L1e.data[m, :] += 1
    L1i.data *= -dt
    L1i.data[m, :] += 1
    R1 *= dt

    L2e.data *= dt
    L2e.data[m, :] += 1
    L2i.data *= -dt
    L2i.data[m, :] += 1
    R2 *= dt

    offsets1 = (abs(min(L1i.offsets)), abs(max(L1i.offsets)))
    offsets2 = (abs(min(L2i.offsets)), abs(max(L2i.offsets)))

    print_step = max(1, int(n / 10))
    to_percent = 100.0 / n
    utils.tic("Crank:")
    R = R1 + R2
    normal_shape = V.shape
    transposed_shape = normal_shape[::-1]
    for k in xrange(n):
        if not k % print_step:
            if isnan(V).any():
                print "Crank fail @ t = %f (%i steps)" % (dt * k, k)
                return crumbs
            print int(k * to_percent),
        if callback is not None:
            callback(V, ((n - k) * dt))
        V = (L2e.dot(V.flat).reshape(normal_shape) + R).T
        V = spl.solve_banded(offsets1, L1i.data, V.flat, overwrite_b=True)
        V = (L1e.dot(V).reshape(transposed_shape).T) + R
        V = spl.solve_banded(offsets2, L2i.data, V.flat,
                             overwrite_b=True).reshape(normal_shape)
        crumbs.append(V.copy())
    utils.toc()
    return crumbs
Exemplo n.º 6
0
def crank(V, L1, R1x, L2, R2x, dt, n, crumbs=[], callback=None):
    V = V.copy()
    dt *= 0.5

    L1e = flatten_tensor(L1)
    L1i = L1e.copy()
    R1 = np.array(R1x).T

    L2e = flatten_tensor(L2)
    L2i = L2e.copy()
    R2 = np.array(R2x)

    m = 2

    # L  = (As + Ass - r*np.eye(nspots))*-dt + np.eye(nspots)
    L1e.data *= dt
    L1e.data[m, :] += 1
    L1i.data *= -dt
    L1i.data[m, :] += 1
    R1 *= dt

    L2e.data *= dt
    L2e.data[m, :] += 1
    L2i.data *= -dt
    L2i.data[m, :] += 1
    R2 *= dt

    offsets1 = (abs(min(L1i.offsets)), abs(max(L1i.offsets)))
    offsets2 = (abs(min(L2i.offsets)), abs(max(L2i.offsets)))

    print_step = max(1, int(n / 10))
    to_percent = 100.0 / n
    utils.tic("Crank:")
    R = R1 + R2
    normal_shape = V.shape
    transposed_shape = normal_shape[::-1]
    for k in xrange(n):
        if not k % print_step:
            if isnan(V).any():
                print "Crank fail @ t = %f (%i steps)" % (dt * k, k)
                return crumbs
            print int(k * to_percent),
        if callback is not None:
            callback(V, ((n - k) * dt))
        V = (L2e.dot(V.flat).reshape(normal_shape) + R).T
        V = spl.solve_banded(offsets1, L1i.data, V.flat, overwrite_b=True)
        V = (L1e.dot(V).reshape(transposed_shape).T) + R
        V = spl.solve_banded(offsets2, L2i.data, V.flat, overwrite_b=True).reshape(normal_shape)
        crumbs.append(V.copy())
    utils.toc()
    return crumbs
Exemplo n.º 7
0
def impl(V, L1, R1x, L2, R2x, dt, n, crumbs=[], callback=None):
    V = V.copy()

    # L1i = flatten_tensor(L1)
    L1i = L1.copy()
    R1 = np.array(R1x).T

    # L2i = flatten_tensor(L2)
    L2i = L2.copy()
    R2 = np.array(R2x)

    m = 2

    # L  = (As + Ass - H.interest_rate*np.eye(nspots))*-dt + np.eye(nspots)
    L1i.data *= -dt
    L1i.data[m, :] += 1
    R1 *= dt

    L2i.data *= -dt
    L2i.data[m, :] += 1
    R2 *= dt

    offsets1 = (abs(min(L1i.offsets)), abs(max(L1i.offsets)))
    offsets2 = (abs(min(L2i.offsets)), abs(max(L2i.offsets)))

    dx = np.gradient(spots)[:,np.newaxis]
    dy = np.gradient(vars)
    X, Y = [dim.T for dim in np.meshgrid(spots, vars)]
    gradgrid = dt * coeffs[(0,1)](0, X, Y) / (dx * dy)
    gradgrid[:,0] = 0; gradgrid[:,-1] = 0
    gradgrid[0,:] = 0; gradgrid[-1,:] = 0

    print_step = max(1, int(n / 10))
    to_percent = 100.0 / n
    utils.tic("Impl:")
    for k in xrange(n):
        if not k % print_step:
            if np.isnan(V).any():
                print "Impl fail @ t = %f (%i steps)" % (dt * k, k)
                return crumbs
            print int(k * to_percent),
        if callback is not None:
            callback(V, ((n - k) * dt))
        Vsv = np.gradient(np.gradient(V)[0])[1] * gradgrid
        V = spl.solve_banded(offsets2, L2i.data,
                             (V + Vsv + R2).flat, overwrite_b=True).reshape(V.shape)
        V = spl.solve_banded(offsets1, L1i.data,
                             (V + R1).T.flat, overwrite_b=True).reshape(V.shape[::-1]).T
    crumbs.append(V.copy())
    utils.toc()
    return crumbs
Exemplo n.º 8
0
Arquivo: NN.py Projeto: Orenmc/NN
    def train(self):
        data = self.train_data
        labels = self.train_labels
        for epoch in range(self.epochs):
            utils.tic()
            total_loss = 0.0  # every epoch loss should start with zero
            good = 0.0
            total_size = 0.0
            # TODO: shuffle?
            data, labels = utils.shuffle(data, labels)
            for d, l in zip(data, labels):
                total_size += 1
                pred, cache = self.fprop(d)
                # check the prediction
                y_hat = np.argmax(pred)
                if y_hat == l:
                    good += 1

                err_cost = float(pred[int(l)])  # loss = -1 * log(err_cost)

                cross_entropy = utils.cross_entropy_loss(err_cost)
                if self.L2:
                    cross_entropy += utils.L2_cost(self.parameters["W"],
                                                   self.L2)
                total_loss += cross_entropy

                grads = self.bprop(cache, d, l)
                self.weights_updates(grads)

            print('epoch {}:'.format(epoch + 1))
            acc = good * 100 / total_size
            train_acc.append(acc)
            avg_loss = total_loss / total_size
            train_loss.append(avg_loss)

            print('train accuracy: {:2.2f}%'.format(acc))
            print('train AVG loss: {:2.2f}'.format(avg_loss))

            self.validation_acc()
            print('time:')
            utils.toc()
            # end of epoch
        # cache all about model
        trained_model = {
            "norm": self.norm,
            "parameters": self.parameters,
            "lr": self.lr
        }
        directory = str(len(self.hidden)) + 'Hidden/L2/'
        np.save(directory + 'model_' + self.model_name, trained_model)
        self.printGraph(directory)
def mainLoop(modelType,modelArgs,positives,trueObjectBoxes,trainingList,featuresDir,featuresExt,modelOut,maxNegOverlap,iter):
  pos,posIdx,ari,osi = positives
  startTime = cu.tic()
  if iter == 0:
    ## Random Negatives
    print ' >>> RANDOM NEGATIVES'
    neg,negIdx = learn.getRandomNegs(featuresDir,trainingList,featuresExt,pos.shape[1],maxVectorsCache,maxNegativeImages)
    detectionsList = [ [x[0],'0.0']+x[1:]+['1'] for x in negIdx]
    hards = {'features':np.zeros((0,neg.shape[1])),'index':[]}
    lap = cu.toc('Random negatives matrix ('+str(neg.shape[0])+' instances)',startTime)
  else:
    ## Mine hard negatives
    print ' >>> MINING HARD NEGATIVES'
    model = det.createDetector(modelType,modelArgs)
    model.load(modelOut+'.'+ str( iter-1 ))
    detectionsList = detector.detectObjects(model,trainingList,featuresDir,featuresExt,0.3,-10.0)
    hards = cu.loadMatrixNoCompression(modelOut+'.hards').item()
    lap = cu.toc('Hard negatives matrix ('+str(hards['features'].shape[0])+' instances)',startTime)

  ## Rank and clean negative detections
  detectionsData = evaluation.loadDetections(detectionsList)
  groundTruth = evaluation.loadGroundTruthAnnotations(trueObjectBoxes)
  detectionsLog = evaluation.evaluateDetections(groundTruth,detectionsData,0.5,allowDuplicates=True) # overlapMeasure=validRegion,
  evaluation.computePrecisionRecall(len(posIdx),detectionsLog['tp'],detectionsLog['fp'],'tmp.txt')
  evaluation.computePrecAt(detectionsLog['tp'],[20,50,100,200,300,400,500])
  logData = learn.parseRankedDetectionsFile(detectionsLog['log'],maxNegOverlap,maxNegativeVectors)
  print ' >>> LOADING HARD NEGATIVES'
  neg,negIdx = learn.loadHardNegativesFromList(featuresDir,logData['negExamples'],featuresExt,pos.shape[1],logData['negTaken'])
  del(detectionsList,detectionsData,detectionsLog,logData)
  lap = cu.toc('Ranked negatives matrix ('+str(neg.shape[0])+' instances)',lap)
  neg = np.concatenate( (neg,hards['features']) )
  negIdx = negIdx + hards['index']

  ## Learn Detector
  clf = det.createDetector(modelType,modelArgs)
  clf.learn(pos,neg,posIdx,negIdx)
  clf.save(modelOut+'.'+str(iter))
  lap = cu.toc('Classifier learned:',lap)

  ## Keep hard negatives for next iterations
  scores = clf.predict(neg,negIdx)
  hardNegsIdx = np.argsort(scores)
  hardNeg = np.concatenate( (hards['features'], neg[hardNegsIdx[-cu.topHards:]]) )
  negIdx = hards['index'] + [negIdx[j] for j in hardNegsIdx[-cu.topHards:]]
  print 'Hard negatives:',hardNeg.shape[0]
  hards = {'features':hardNeg, 'index':negIdx}
  cu.saveMatrixNoCompression({'features':hardNeg,'index':negIdx},modelOut+'.hards')

  print ' ** Iteration',iter,'done'
  return {'detector':clf,'pos':pos,'posIdx':posIdx,'neg':neg,'negIdx':negIdx}
Exemplo n.º 10
0
def processImg(info, filename, idx, batchSize, layers, output):
  startTime = tic()
  allFeat = {}
  n = len(info)
  for l in layers.keys():
    allFeat[l] = emptyMatrix([n,layers[l]['dim']])
  numBatches = (n + batchSize - 1) / batchSize
  # Write the index file
  [idx.write(b[4]) for b in info]
  # Prepare boxes, make sure that extra rows are added to fill the last batch
  boxes = [x[:-1] for x in info] + [ [0,0,0,0] for x in range(numBatches * batchSize - n) ]
  # Initialize the image
  net.caffenet.InitializeImage(filename, ImageNetMean)
  for k in range(numBatches):
    s,f = k*batchSize,(k+1)*batchSize
    e = batchSize if f <= n else n-s
    # Forward this batch
    net.caffenet.ForwardRegions(boxes[s:f])
    #outputBlobs = [ np.empty((batch, 1000, 1, 1), dtype=np.float32) ]
    #net.caffenet.ForwardRegions(boxes[s:f],filename, outputBlobs)
    #print outputBlobs[0][0].shape, np.argmax(outputBlobs[0][0]), np.max(outputBlobs[0][0])
    outputs =  net.caffenet.blobs()
    f = n if f > n else f
    # Collect outputs
    for l in layers.keys():
      allFeat[l][s:f,:] = outputs[layers[l]['idx']].data[0:e,:,:,:].reshape([e,layers[l]['dim']])
  # Release image data
  net.caffenet.ReleaseImageData()
  # Save files for this image
  for l in layers.keys():
    saveMatrix(allFeat[l][0:n,:],output+'.'+l)
  lap = toc('GPU is done with '+str(len(info))+' boxes in:',startTime)
def processImg(info, filename, batchSize, layers):
  startTime = tic()
  allFeat = {}
  n = len(info)
  for l in layers.keys():
    allFeat[l] = emptyMatrix([n,layers[l]['dim']])
  numBatches = (n + batchSize - 1) / batchSize
  # Prepare boxes, make sure that extra rows are added to fill the last batch
  boxes = [x[:-1] for x in info] + [ [0,0,0,0] for x in range(numBatches * batchSize - n) ]
  # Initialize the image
  net.caffenet.InitializeImage(filename, IMG_DIM, ImageNetMean, CROP_SIZE)
  for k in range(numBatches):
    s,f = k*batchSize,(k+1)*batchSize
    e = batchSize if f <= n else n-s
    # Forward this batch
    net.caffenet.ForwardRegions(boxes[s:f],CONTEXT_PAD)
    outputs = net.caffenet.blobs
    f = n if f > n else f
    # Collect outputs
    for l in layers.keys():
      allFeat[l][s:f,:] = outputs[layers[l]['idx']].data[0:e,:,:,:].reshape([e,layers[l]['dim']])
  # Release image data
  net.caffenet.ReleaseImageData()
  # Return features of boxes for this image
  for l in layers.keys():
    allFeat[l] = allFeat[l][0:n,:]
  lap = toc('GPU is done with '+str(len(info))+' boxes in:',startTime)
  return allFeat
def fit_gen_model(phi):
    assert phi.ndim == 2, 'data has to be two-dimensional'
    assert phi.shape[1] > phi.shape[0], 'data samples have to be in columns'
    d, nsamples = phi.shape
    nij = 4 * d**2  # number of coupling terms

    adata, arow, acol, b = fill_gen_model_matrix(phi)
    a = sparse.coo_matrix((adata, (arow, acol)), (nij, nij))

    tic('matrix inversion')
    m_vec, flag = isolve.cg(a.tocsr(), b)
    # print 'exit flag = ', flag
    assert flag == 0
    m = m_vec2mat(m_vec)
    toc('matrix inversion')
    return m
Exemplo n.º 13
0
def processImg(info, filename, idx, batchSize, layers):
    startTime = tic()
    allFeat = {}
    n = len(info)
    for l in layers.keys():
        allFeat[l] = emptyMatrix([n, layers[l]['dim']])
    numBatches = (n + batchSize - 1) / batchSize
    # Write the index file
    [idx.write(b[4]) for b in info]
    # Prepare boxes, make sure that extra rows are added to fill the last batch
    boxes = [x[:-1]
             for x in info] + [[0, 0, 0, 0]
                               for x in range(numBatches * batchSize - n)]
    # Initialize the image
    net.caffenet.InitializeImage(filename, IMG_DIM, ImageNetMean, CROP_SIZE)
    for k in range(numBatches):
        s, f = k * batchSize, (k + 1) * batchSize
        e = batchSize if f <= n else n - s
        # Forward this batch
        net.caffenet.ForwardRegions(boxes[s:f], CONTEXT_PAD)  #,filename)
        outputs = net.caffenet.blobs
        f = n if f > n else f
        # Collect outputs
        for l in layers.keys():
            allFeat[l][s:f, :] = outputs[layers[l]['idx']].data[
                0:e, :, :, :].reshape([e, layers[l]['dim']])
    # Release image data
    net.caffenet.ReleaseImageData()
    # Return features of boxes for this image
    for l in layers.keys():
        allFeat[l] = allFeat[l][0:n, :]
    lap = toc('GPU is done with ' + str(len(info)) + ' boxes in:', startTime)
    return allFeat
def processImg(imgName, filename, idx, batchSize, layers, output):
  startTime = tic()
  # Initialize image and boxes
  dims = net.caffenet.InitializeImage(filename, IMG_DIM, ImageNetMean, CROP_SIZE)
  boxes = multiScaleBoxes(dims, CROP_SIZE)
  # Write index file
  [idx.write(imgName + ' ' + ' '.join(map(str,b)) + '\n') for b in boxes]
  #Prepare boxes, make sure that extra rows are added to fill the last batch
  allFeat = {}
  n = len(boxes)
  for l in layers.keys():
    allFeat[l] = emptyMatrix([n,layers[l]['dim']])
  numBatches = (n + batchSize - 1) / batchSize
  boxes += [ [0,0,0,0] for x in range(numBatches * batchSize - n) ]

  for k in range(numBatches):
    s,f = k*batchSize,(k+1)*batchSize
    e = batchSize if f <= n else n-s
    # Forward this batch
    net.caffenet.ForwardRegions(boxes[s:f],CONTEXT_PAD) #,filename)
    outputs =  net.caffenet.blobs
    f = n if f > n else f
    # Collect outputs
    for l in layers.keys():
      allFeat[l][s:f,:] = outputs[layers[l]['idx']].data[0:e,:,:,:].reshape([e,layers[l]['dim']])
  # Release image data
  net.caffenet.ReleaseImageData()
  # Save files for this image
  for l in layers.keys():
    saveMatrix(allFeat[l][0:n,:],output+'.'+l)
  lap = toc('GPU is done with '+str(n)+' boxes in:',startTime)
Exemplo n.º 15
0
def fit_gen_model(phi):
    assert phi.ndim == 2, 'data has to be two-dimensional'
    assert phi.shape[1] > phi.shape[0], 'data samples have to be in columns'
    d, nsamples = phi.shape
    nij = 4*d**2 # number of coupling terms

    adata, arow, acol, b = fill_gen_model_matrix(phi)
    a = sparse.coo_matrix((adata,(arow,acol)), (nij,nij))

    tic('matrix inversion')
    m_vec,flag = isolve.cg(a.tocsr(),b)
    # print 'exit flag = ', flag
    assert flag==0
    m = m_vec2mat(m_vec)
    toc('matrix inversion')
    return m
Exemplo n.º 16
0
def fill_model_matrix(phi):
    z = np.concatenate((np.exp(1j*phi),np.exp(-1j*phi)))
    d, nsamples = phi.shape
    z.shape = (2,d,nsamples)
    nij = d**2-d # number of coupling terms
    na = 4*d**3-10*d**2+6*d # upper bound for number of elements in sparse matrix
    adata = np.zeros(na,complex)
    arow = np.zeros(na,int)
    acol = np.zeros(na,int)
    b = np.zeros(nij,complex)

    tic('weave')
    weave.inline(phasemodel_code_blitz, ['z','adata','arow','acol','b'],
                 type_converters=weave.converters.blitz)
    toc('weave')
    return adata, arow, acol, b
Exemplo n.º 17
0
def processImg(imgName, filename, idx, batchSize, layers, output):
    startTime = tic()
    # Initialize image and boxes
    dims = net.caffenet.InitializeImage(filename, IMG_DIM, ImageNetMean,
                                        CROP_SIZE)
    boxes = multiScaleBoxes(dims, CROP_SIZE)
    # Write index file
    [idx.write(imgName + ' ' + ' '.join(map(str, b)) + '\n') for b in boxes]
    #Prepare boxes, make sure that extra rows are added to fill the last batch
    allFeat = {}
    n = len(boxes)
    for l in layers.keys():
        allFeat[l] = emptyMatrix([n, layers[l]['dim']])
    numBatches = (n + batchSize - 1) / batchSize
    boxes += [[0, 0, 0, 0] for x in range(numBatches * batchSize - n)]

    for k in range(numBatches):
        s, f = k * batchSize, (k + 1) * batchSize
        e = batchSize if f <= n else n - s
        # Forward this batch
        net.caffenet.ForwardRegions(boxes[s:f], CONTEXT_PAD)  #,filename)
        outputs = net.caffenet.blobs
        f = n if f > n else f
        # Collect outputs
        for l in layers.keys():
            allFeat[l][s:f, :] = outputs[layers[l]['idx']].data[
                0:e, :, :, :].reshape([e, layers[l]['dim']])
    # Release image data
    net.caffenet.ReleaseImageData()
    # Save files for this image
    for l in layers.keys():
        saveMatrix(allFeat[l][0:n, :], output + '.' + l)
    lap = toc('GPU is done with ' + str(n) + ' boxes in:', startTime)
Exemplo n.º 18
0
 def train(self):
     networkFile = config.get('networkDir') + config.get(
         'snapshotPrefix') + '_iter_' + config.get(
             'trainingIterationsPerBatch') + '.caffemodel'
     interactions = config.geti('trainInteractions')
     minEpsilon = config.getf('minTrainingEpsilon')
     epochSize = len(self.environment.imageList) / 1
     epsilon = 1.0
     self.controller.setEpsilonGreedy(epsilon,
                                      self.environment.sampleAction)
     epoch = 1
     exEpochs = config.geti('explorationEpochs')
     while epoch <= exEpochs:
         s = cu.tic()
         print 'Epoch', epoch, ': Exploration (epsilon=1.0)'
         self.runEpoch(interactions, len(self.environment.imageList))
         self.task.flushStats()
         self.doValidation(epoch)
         s = cu.toc('Epoch done in ', s)
         epoch += 1
     self.learner = QLearning()
     self.agent.learner = self.learner
     egEpochs = config.geti('epsilonGreedyEpochs')
     while epoch <= egEpochs + exEpochs:
         s = cu.tic()
         epsilon = epsilon - (1.0 - minEpsilon) / float(egEpochs)
         if epsilon < minEpsilon: epsilon = minEpsilon
         self.controller.setEpsilonGreedy(epsilon,
                                          self.environment.sampleAction)
         print 'Epoch', epoch, '(epsilon-greedy:{:5.3f})'.format(epsilon)
         self.runEpoch(interactions, epochSize)
         self.task.flushStats()
         self.doValidation(epoch)
         s = cu.toc('Epoch done in ', s)
         epoch += 1
     maxEpochs = config.geti('exploitLearningEpochs') + exEpochs + egEpochs
     while epoch <= maxEpochs:
         s = cu.tic()
         print 'Epoch', epoch, '(exploitation mode: epsilon={:5.3f})'.format(
             epsilon)
         self.runEpoch(interactions, epochSize)
         self.task.flushStats()
         self.doValidation(epoch)
         s = cu.toc('Epoch done in ', s)
         shutil.copy(networkFile, networkFile + '.' + str(epoch))
         epoch += 1
Exemplo n.º 19
0
 def load_data(self, file_path, data_loading_fn, *args, **kwargs):
     # print(os.path.join(data_path,filename))
     local_file_path = os.path.expanduser(file_path) 
     self.file_path = local_file_path
     # print(os.path.isfile(os.path.join(local_file_path,filename)))
     print('loading file: %s' %file_path)
     if not os.path.isfile(local_file_path) and self.main_hostname != 'localhost':
         print('Copying data from remote machine...')
         tic()
         ssh.copy_file_from_remote(file_path, self.main_hostname, self.username)
         print('Completed copying data from main host.')
         toc()
     print('Loading data...')
     tic()
     data = data_loading_fn(local_file_path, *args, **kwargs)
     toc()
     return data
Exemplo n.º 20
0
def fill_gen_model_matrix(phi):
    d, nsamples = phi.shape
    x = p2torus(phi)
    q = p2dtorus(phi)
    x.shape = (d,2,nsamples)
    q.shape = (d,2,nsamples)

    nij = 4*d**2 # number of coupling terms
    na = 32*d**3 - 16*d**2 # number of elements in large matrix multiplying mij
    adata = np.zeros(na,float)
    arow = np.zeros(na,int)
    acol = np.zeros(na,int)
    b = np.zeros(nij,float)

    tic('weave')
    weave.inline(gen_phasemodel_code, ['x','q','adata','arow','acol','b'])
    toc('weave')
    return adata, arow, acol, b
Exemplo n.º 21
0
 def run(self, image, features, boxes):
   s = cu.tic()
   result = {}
   boxSet = [ map(float, b[1:]) for b in boxes ]
   for i in self.catIndex:
     scores = features[:,i]
     fb,fs = det.nonMaximumSuppression(boxSet, scores, self.maxOverlap)
     result[i] = (image, fb, fs)
   s = cu.toc(image, s)
   return result
 def runEpoch(self, interactions, maxImgs):
   img = 0
   s = cu.tic()
   while img < maxImgs:
     self.experiment.doInteractions(interactions)
     self.agent.learn()
     self.agent.reset()
     self.environment.loadNextEpisode()
     img += 1
   s = cu.toc('Run epoch with ' + str(maxImgs) + ' episodes', s)
Exemplo n.º 23
0
 def runEpoch(self, interactions, maxImgs):
     img = 0
     s = cu.tic()
     while img < maxImgs:
         self.experiment.doInteractions(interactions)
         self.agent.learn()
         self.agent.reset()
         self.environment.loadNextEpisode()
         img += 1
     s = cu.toc('Run epoch with ' + str(maxImgs) + ' episodes', s)
Exemplo n.º 24
0
 def run(self, image, features, boxes):
     s = cu.tic()
     result = {}
     boxSet = [map(float, b[1:]) for b in boxes]
     for i in self.catIndex:
         scores = features[:, i]
         fb, fs = det.nonMaximumSuppression(boxSet, scores, self.maxOverlap)
         result[i] = (image, fb, fs)
     s = cu.toc(image, s)
     return result
Exemplo n.º 25
0
def impl(V, L1, R1x, L2, R2x, dt, n, crumbs=[], callback=None):
    V = V.copy()

    L1i = flatten_tensor(L1)
    R1 = np.array(R1x).T

    L2i = flatten_tensor(L2)
    R2 = np.array(R2x)

    m = 2

    # L  = (As + Ass - r*np.eye(nspots))*-dt + np.eye(nspots)
    L1i.data *= -dt
    L1i.data[m, :] += 1
    R1 *= dt

    L2i.data *= -dt
    L2i.data[m, :] += 1
    R2 *= dt

    offsets1 = (abs(min(L1i.offsets)), abs(max(L1i.offsets)))
    offsets2 = (abs(min(L2i.offsets)), abs(max(L2i.offsets)))

    print_step = max(1, int(n / 10))
    to_percent = 100.0 / n
    utils.tic("Impl:")
    for k in xrange(n):
        if not k % print_step:
            if isnan(V).any():
                print "Impl fail @ t = %f (%i steps)" % (dt * k, k)
                return crumbs
            print int(k * to_percent),
        if callback is not None:
            callback(V, ((n - k) * dt))
        V = spl.solve_banded(offsets2,
                             L2i.data, (V + R2).flat,
                             overwrite_b=True).reshape(V.shape)
        V = spl.solve_banded(offsets1,
                             L1i.data, (V + R1).T.flat,
                             overwrite_b=True).reshape(V.shape[::-1]).T
    crumbs.append(V.copy())
    utils.toc()
    return crumbs
Exemplo n.º 26
0
 def train(self):
   networkFile = config.get('networkDir') + config.get('snapshotPrefix') + '_iter_' + config.get('trainingIterationsPerBatch') + '.caffemodel'
   interactions = config.geti('trainInteractions')
   minEpsilon = config.getf('minTrainingEpsilon')
   epochSize = len(self.environment.imageList)/1
   epsilon = 1.0
   self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction)
   epoch = 1
   exEpochs = config.geti('explorationEpochs')
   while epoch <= exEpochs:
     s = cu.tic()
     print 'Epoch',epoch,': Exploration (epsilon=1.0)'
     self.runEpoch(interactions, len(self.environment.imageList))
     self.task.flushStats()
     self.doValidation(epoch)
     s = cu.toc('Epoch done in ',s)
     epoch += 1
   self.learner = QLearning()
   self.agent.learner = self.learner
   egEpochs = config.geti('epsilonGreedyEpochs')
   while epoch <= egEpochs + exEpochs:
     s = cu.tic()
     epsilon = epsilon - (1.0-minEpsilon)/float(egEpochs)
     if epsilon < minEpsilon: epsilon = minEpsilon
     self.controller.setEpsilonGreedy(epsilon, self.environment.sampleAction)
     print 'Epoch',epoch ,'(epsilon-greedy:{:5.3f})'.format(epsilon)
     self.runEpoch(interactions, epochSize)
     self.task.flushStats()
     self.doValidation(epoch)
     s = cu.toc('Epoch done in ',s)
     epoch += 1
   maxEpochs = config.geti('exploitLearningEpochs') + exEpochs + egEpochs
   while epoch <= maxEpochs:
     s = cu.tic()
     print 'Epoch',epoch,'(exploitation mode: epsilon={:5.3f})'.format(epsilon)
     self.runEpoch(interactions, epochSize)
     self.task.flushStats()
     self.doValidation(epoch)
     s = cu.toc('Epoch done in ',s)
     shutil.copy(networkFile, networkFile + '.' + str(epoch))
     epoch += 1
Exemplo n.º 27
0
def impl(V, L1, R1x, L2, R2x, dt, n, crumbs=[], callback=None):
    V = V.copy()

    L1i = flatten_tensor(L1)
    R1 = np.array(R1x).T

    L2i = flatten_tensor(L2)
    R2 = np.array(R2x)

    m = 2

    # L  = (As + Ass - r*np.eye(nspots))*-dt + np.eye(nspots)
    L1i.data *= -dt
    L1i.data[m, :] += 1
    R1 *= dt

    L2i.data *= -dt
    L2i.data[m, :] += 1
    R2 *= dt

    offsets1 = (abs(min(L1i.offsets)), abs(max(L1i.offsets)))
    offsets2 = (abs(min(L2i.offsets)), abs(max(L2i.offsets)))

    print_step = max(1, int(n / 10))
    to_percent = 100.0 / n
    utils.tic("Impl:")
    for k in xrange(n):
        if not k % print_step:
            if isnan(V).any():
                print "Impl fail @ t = %f (%i steps)" % (dt * k, k)
                return crumbs
            print int(k * to_percent),
        if callback is not None:
            callback(V, ((n - k) * dt))
        V = spl.solve_banded(offsets2, L2i.data,
                             (V + R2).flat, overwrite_b=True).reshape(V.shape)
        V = spl.solve_banded(offsets1, L1i.data,
                             (V + R1).T.flat, overwrite_b=True).reshape(V.shape[::-1]).T
    crumbs.append(V.copy())
    utils.toc()
    return crumbs
Exemplo n.º 28
0
 def runEpoch(self, interactions, maxImgs):
   img = 0
   s = cu.tic()
   while img < maxImgs:
     k = 0
     while not self.environment.episodeDone and k < interactions:
       self.experiment._oneInteraction()
       k += 1
     self.agent.learn()
     self.agent.reset()
     self.environment.loadNextEpisode()
     img += 1
   s = cu.toc('Run epoch with ' + str(maxImgs) + ' episodes', s)
Exemplo n.º 29
0
 def runEpoch(self, interactions, maxImgs):
     img = 0
     s = cu.tic()
     while img < maxImgs:
         k = 0
         while not self.environment.episodeDone and k < interactions:
             self.experiment._oneInteraction()
             k += 1
         self.agent.learn()
         self.agent.reset()
         self.environment.loadNextEpisode()
         img += 1
     s = cu.toc('Run epoch with ' + str(maxImgs) + ' episodes', s)
Exemplo n.º 30
0
 def do_next_group(self, tiffs_to_run):
     """
     Replaces the OnlineAnalysis.do_next_group() so we can fake experiments and test downstream
     analysis.
     """
     t = tic()
     self.validate_tiffs()
     self.opts.change_params(dict(fnames=tiffs_to_run))
     self.make_mmap(tiffs_to_run)
     self.make_movie()
     self.C = self.do_fit()
     self.trial_lengths.append(self.splits)
     self.group_lenths.append(toc(t))
     #self.save_json()
     self.advance(by=1)
Exemplo n.º 31
0
def timing_benchmark(eval_dim=None,dims=[2, 4, 6, 8, 10],nsamps=10**4):
    ind = 0
    t = np.zeros(len(dims),float)
    for d in dims:
        print 'Benchmarking dim: ', d
        phi = 2*np.pi*np.random.rand(d,nsamps)

        tic('fit parameters')
        c_inv = fit_model(phi)
        t[ind] = toc('fit parameters')
        ind += 1
    pol = np.polyfit(dims[1:],t[1:],3)
    if eval_dim:
        print np.polyval(pol,eval_dim)
    else:
        return pol
def timing_benchmark(eval_dim=None, dims=[2, 4, 6, 8, 10], nsamps=10**4):
    ind = 0
    t = np.zeros(len(dims), float)
    for d in dims:
        print 'Benchmarking dim: ', d
        phi = 2 * np.pi * np.random.rand(d, nsamps)

        tic('fit parameters')
        c_inv = fit_model(phi)
        t[ind] = toc('fit parameters')
        ind += 1
    pol = np.polyfit(dims[1:], t[1:], 3)
    if eval_dim:
        print np.polyval(pol, eval_dim)
    else:
        return pol
def computeFeatures(batch, n, data, net, layers, output):
  startTime = tic()
  allFeat = {}
  for l in layers.keys():
    allFeat[l] = emptyMatrix([n,layers[l]['dim']])
  # Extract and store CNN Features
  outputBlobs = [np.empty((batch, 1000, 1, 1), dtype=np.float32)]
  for i in range(batch,n+batch,batch):
    inputBlobs = np.empty((batch, 3, 227, 227), dtype=np.float32)
    start = i-batch
    finish = min(i,n)
    elems = finish-start
    inputBlobs[0:elems,:,:,:] = data[start:finish,:,:,:]
    net.caffenet.Forward([inputBlobs], outputBlobs)
    outputs =  net.caffenet.blobs()
    for l in layers.keys():
      allFeat[l][start:finish,:] = outputs[layers[l]['idx']].data[0:elems,:,:,:].reshape([elems,layers[l]['dim']])
  # Save files for this image
  for l in layers.keys():
    saveMatrix(allFeat[l][0:n,:],output+'.'+l)
  lap = toc('Image ready with '+str(n)+' boxes in:',startTime)
Exemplo n.º 34
0
def computeFeatures(batch, n, data, net, layers, output):
    startTime = tic()
    allFeat = {}
    for l in layers.keys():
        allFeat[l] = emptyMatrix([n, layers[l]['dim']])
    # Extract and store CNN Features
    outputBlobs = [np.empty((batch, 1000, 1, 1), dtype=np.float32)]
    for i in range(batch, n + batch, batch):
        inputBlobs = np.empty((batch, 3, 227, 227), dtype=np.float32)
        start = i - batch
        finish = min(i, n)
        elems = finish - start
        inputBlobs[0:elems, :, :, :] = data[start:finish, :, :, :]
        net.caffenet.Forward([inputBlobs], outputBlobs)
        outputs = net.caffenet.blobs()
        for l in layers.keys():
            allFeat[l][start:finish, :] = outputs[layers[l]['idx']].data[
                0:elems, :, :, :].reshape([elems, layers[l]['dim']])
    # Save files for this image
    for l in layers.keys():
        saveMatrix(allFeat[l][0:n, :], output + '.' + l)
    lap = toc('Image ready with ' + str(n) + ' boxes in:', startTime)
import utils as cu
import libDetection as det
from dataProcessor import processData

class Checker():
  def __init__(self):
   print 'Starting checker' 

  def run(self,img,features,bboxes):
    return img,features.shape[0] == len(bboxes)

## Main Program Parameters
params = cu.loadParams("testImageList featuresDir featuresExt")

imageList = [x.replace('\n','') for x in open(params['testImageList'])]
## Run Detector
task = Checker()
start = cu.tic()
result = processData(imageList,params['featuresDir'],params['featuresExt'],task)
cu.toc('All images checked',start)
totalP = 0
for data in result:
  img,r = data
  if not r:
    print 'Problems with',img
    totalP += 1
print 'Total problems:',totalP
Exemplo n.º 36
0
def main(argv):
    global STATES
    year, state, ext, shift = (None, None, None, None)
    usage = '05-cand-info-vices.py -y 2016 -s SC -t 1 or 2 -e data.csv'

    try:
        opts, _args = getopt.getopt(
            argv, 'hy:s:t:e:', ['year=', 'state=', 'shift=', 'ext='])
    except getopt.GetoptError:
        print(usage)
        sys.exit()
    for opt, arg in opts:
        if opt == '-h':
            print(usage)
            sys.exit()
        elif opt in ('-y', '--year'):
            year = arg
        elif opt in ('-s', '--state'):
            state = arg
        elif opt in ('-t', '--shift'):
            shift = arg
        elif opt in ('-e', '--ext='):
            ext = arg

    if year == '2020' or year == '2016' or year == '2012':
        STATES.remove('DF')
    else:
        print('Year is invalid!')
        print(usage)
        sys.exit()

    if state:
        STATES = list(filter(lambda x: x == str(state), STATES))

    if not shift:
        print('The shift number is required! 1 or 2 shift.')
        sys.exit()

    engine = create_engine(DATABASE, echo=False)

    tic()

    for st in STATES:
        print(
            'Reading candidates (%s) for vice-mayor of all cities in the state of: %s in shift: %s' %
            (year, st, shift))

        df = pd.read_sql("""SELECT nm_city FROM cand_info
            WHERE election_year = '{}' AND sg_uf = '{}' GROUP BY 1 ORDER BY 1""".format(year, st), engine)

        # df = pd.read_sql("""SELECT nm_city FROM cand_info
        # WHERE election_year = '{}' AND sg_uf = '{}' AND nm_city = 'OSASCO'
        # GROUP BY 1 ORDER BY 1""".format(year, st), engine)

        dfcount = df['nm_city'].count()
        bar = Bar('Progress', max=dfcount)

        # for ct in ['RIO BRANCO']:
        for ct in df['nm_city'].tolist():
            if year == '2020' or year == '2016' or year == '2012':
                df0 = pd.read_sql("""
                SELECT
                    t2.election_year,
                    t2.sg_uf,
                    t1.sq_candidate,
                    t1.nr_cpf_candidate,
                    t1.nm_candidate,
                    t1.sg_party,
                    t1.nr_party,
                    t1.nm_ballot_candidate,
                    t1.ds_position,
                    t1.ds_situ_tot_shift,
                    t2.nm_city,
                    t1.ds_situ_cand,
                    t1.nm_email,
                    t1.ds_genre,
                    t1.ds_degree_instruction,
                    t1.ds_race_color,
                    t1.ds_occupation,
                    t1.nr_campaign_max_expenditure,
                    t1.st_reelection,
                    t1.dt_birth,
                    t1.nr_shift,
                    t1.ds_election,
                    t1.sq_alliance
                FROM raw_tse_consult_candidates t1
                    INNER JOIN cand_info AS t2 ON (t1.sq_alliance = t2.sq_alliance)
                    WHERE t1.election_year = '{}' AND t1.sg_uf = '{}'
                    AND t1.cd_position = 12 AND t2.nm_city = "{}" AND t1.nr_shift = '{}'
                    GROUP BY 1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23""".format(year, st, ct, shift), engine)

                df0 = df0.applymap(
                    lambda s: s.upper() if isinstance(
                        s, str) else s)

                df1 = pd.read_sql("""SELECT * FROM cand_info
                    WHERE sg_uf = '{}' AND nm_city = "{}"
                        AND ds_position = 'PREFEITO' ORDER BY qt_votes_nominal_int DESC""".format(st, ct), engine)

                df2 = pd.merge(df1, df0, on='sq_alliance', how='inner')
                df3 = df2[COLS_VICES_XY]

                df4 = df3.rename(columns=COLS_VICES_XY_NEW, inplace=False)
                df4 = df4.applymap(
                    lambda s: s.upper() if isinstance(
                        s, str) else s)

                if int(shift) == 1:
                    if any(df4['ds_situ_tot_shift'] == '2º TURNO'):
                        df4 = df4.where(df4['ds_situ_tot_shift'] != '2º TURNO')
                    if any(df4['ds_situ_tot_shift'] == '#NULO#'):
                        df4.loc[df4['ds_situ_tot_shift'] == '#NULO#',
                                ['ds_situ_tot_shift']] = 'NÃO ELEITO'

                elif int(shift) == 2:
                    if not df4.empty:
                        df4.sort_values(
                            by=['ds_situ_tot_shift'],
                            inplace=False,
                            ascending=False)
                        rank = df4['qt_votes_nominal_int'].nlargest(4).tolist()

                        for i in range(len(rank)):
                            if i == 0:
                                df4.loc[df4['qt_votes_nominal_int'] == rank[0], [
                                    'ds_situ_tot_shift']] = 'ELEITO'
                            if i == 1:
                                df4.loc[df4['qt_votes_nominal_int'] == rank[1], [
                                    'ds_situ_tot_shift']] = '2º TURNO'
                            if i == 2:
                                df4.loc[df4['qt_votes_nominal_int'] == rank[2], [
                                    'ds_situ_tot_shift']] = 'NÃO ELEITO'
                            if i == 3:
                                df4.loc[df4['qt_votes_nominal_int'] == rank[3], [
                                    'ds_situ_tot_shift']] = '2º TURNO'
                    else:
                        pass

                df5 = pd.read_sql("""SELECT * FROM cand_info
                    WHERE sg_uf = '{}' AND nm_city = "{}"
                        AND ds_position = 'VICE-PREFEITO'""".format(st, ct), engine)

                for i in df4['sq_candidate'].tolist():
                    if any(df5['sq_candidate'] == i):
                        df4 = df4[df4['sq_candidate'] != i]

                df6 = pd.read_sql("""
                SELECT
                    sq_candidate,
                    format(sum(amount_goods_declared), 0, 'de_DE') as amount_goods_declared,
                    sum(amount_goods_declared) as amount_goods_declared_float
                FROM raw_tse_cand_goods_declared
                    WHERE election_year = '{}' AND sg_uf = '{}' GROUP BY 1
                    ORDER BY 3 DESC""".format(year, st), engine)

                df7 = pd.merge(df4, df6, on='sq_candidate', how='inner')

                if df7.empty:
                    df4['amount_goods_declared'] = ''
                    df4['amount_goods_declared_float'] = 0
                    df7 = df4

                if not df7.empty:
                    final = df7.sort_values(
                        by=['qt_votes_nominal'],
                        inplace=False,
                        ascending=False)

                    if ext:
                        write_to_csv(final)

                    final.to_sql(
                        con=engine,
                        name=CAND_TABLE_NAME,
                        if_exists='append',
                        index=False,
                        index_label=CAND_TABLE_NAME_ID)
            else:
                raise ValueError('Invalid year')
            bar.next()
        bar.finish()

    toc()
def saveMatrix(matrix,outFile):
  outf = open(outFile,'w')
  np.savez_compressed(outf,matrix)
  outf.close()

#################################
# Extract Features
#################################
startTime = tic()
totalItems = len(images)
layers = {'fc6_neuron_cudanet_out': {'dim':4096,'idx':'fc6'}, 'fc7_neuron_cudanet_out': {'dim':4096,'idx':'fc7'}}
batch = 50

print 'Extracting features for',totalItems,'total images'
for name in images:
  # Check if files already exist
  processed = 0
  for l in layers.keys():
    if os.path.isfile(outDir+'/'+name+'.'+l):
      processed += 1
  if processed == len(layers):
    continue
  # Get features for patches
  indexFile = open(outDir+'/'+name+'.idx','w')
  processImg(name, imgsDir+'/'+name+'.jpg', indexFile, batch, layers, outDir+'/'+name)
  indexFile.close()

toc('Total processing time:',startTime)

Exemplo n.º 38
0
plt.title("Rejection Method (Constant Function)")
plt.legend(["rho(x)", "f(x)=3/10", "Histogram data"])
plt.show()

plt.figure(4)
plt.hist(y_inverse2, bins, normed=1)
plt.plot(x, const_function)
plt.show()

area_cauchy_exact = 3 * 2 * math.atan(xmax) / pi
area_rho_exact = 0.921348
area_constant_exact = 0.3 * 20

# Acceptance Rates
print("Acceptance rate cauchy exact: {:.2f}%".format(area_rho_exact /
                                                     area_cauchy_exact * 100))
print("Acceptance rate rejection method (cauchy): {:.2f}%\n".format(
    acceptance_rate))

print("Acceptance rate constant function exact: {:.2f}%".format(
    area_rho_exact / area_constant_exact * 100))
print("Acceptance rate rejection method (constant): {:.2f}%\n".format(
    acceptance_rate2))

# Save plots to pdf file
utils.save_fig_to_pdf("ComSim1.pdf", fig_functions, fig_inverse, fig_rejection,
                      fig_rejection2)

# Measure total program runtime
utils.toc(start_time)
Exemplo n.º 39
0
    print('\n\n************ Computing Distance Matrix ************\n\n')

    for i in range(total_shapes):
        f = open(save_path + '/' + PDs[i][0], 'rb')
        pts1 = pickle.load(f)
        f.close()
        for j in range(i, total_shapes):
            f = open(save_path + '/' + PDs[j][0], 'rb')
            pts2 = pickle.load(f)
            f.close()
            for k in range(len(pts1)):
                utils.tic()
                distmat1[i, j] = distmat1[i, j] + utils.subspace_angle(
                    pts1[k], pts2[k])
                time_taken_SubspaceAngle.append(utils.toc())
                utils.tic()
                distmat2[i, j] = distmat2[i, j] + utils.distChordalGrass(
                    pts1[k], pts2[k])
                time_taken_Chordal.append(utils.toc())
        if not (i + 1) % 8:
            print('#', end=" ")
    distmat = distmat1 + distmat1.T
    f = open(code_path + '/distmat_SubspaceAngle_' + descriptor + '.pckl',
             'wb')
    pickle.dump(distmat, f)
    f.close()

    time_taken_SubspaceAngle = time_taken_SubspaceAngle[0:3000]
    f = open(code_path + '/time_taken_SubspaceAngle_' + descriptor + '.pckl',
             'wb')
Exemplo n.º 40
0
    dim = 3

    M = np.random.randn(2*dim,2*dim)
    M += M.T.copy()
    for i in np.arange(M.shape[0]/2):
        s = M[2*i,2*i] + M[2*i+1,2*i+1]
        M[2*i,2*i]     -= s/2
        M[2*i+1,2*i+1] -= s/2

    tic('sampling')
    nsamples = 10**4
    burnin = 10**3
    lf_steps = 50
    step_sz = .15
    phi,E,diagn = sample_hmc(M,nsamples,burnin,lf_steps,step_sz,diagnostics=True)
    toc('sampling')

    tic('fiting')
    M_hat = fit_model(phi)
    Mneg_hat,Mpos_hat= m2kappa(M_hat)
    # anti-symmetrize diagonal elements for estimation matrix
    for i in np.arange(M_hat.shape[0]/2):
        s = M_hat[2*i,2*i] + M_hat[2*i+1,2*i+1]
        M_hat[2*i,2*i]     -= s/2
        M_hat[2*i+1,2*i+1] -= s/2
    toc('fiting')

    M_error = M - M_hat
    M_max = max(abs(M).max(),abs(M_hat).max())
    print 'M_error norm = ', (M_error**2).sum()
  scipy.io.savemat(output, mat, do_compression=True)

##################################
# Organize boxes by source image
#################################
startTime = tic()

images = {}
for s,box in bboxes:
  # Subtract 1 because RCNN proposals have 1-based indexes for Matlab
  b = map(lambda x: int(x)-1,box[1:]) + [s.replace('\n','')]
  try:
    images[ box[0] ].append(b)
  except:
    images[ box[0] ] = [b]
lap = toc('Reading boxes file:',startTime)

groundTruth, categories = loadBoxAnnotationsFile(groundTruthFile)
print 'Found categories:',categories
lap = toc('Reading ground truth file:',lap)

#################################
# Extract Features
#################################
totalItems = len(bboxes)
del(bboxes)
layers = {'pool5': {'dim':9216,'idx':'pool5'}}

print 'Extracting features for',totalItems,'total regions'
for name in images.keys():
  # Get window proposals
  def __init__(self, boxes):
    t = cu.tic()
    self.auxBoxes = []
    frame = [999,999,0,0]
    id = 0
    for box in boxes.tolist():
      frame = min(frame[0:2], box[0:2]) + max(frame[2:],box[2:])
      self.auxBoxes.append(Box(box, id))
      id += 1
    self.frame = map(int,frame)
    self.auxBoxes.sort(key=lambda x:x.area, reverse=True)
    self.adjacency = np.zeros( (len(self.auxBoxes),len(self.auxBoxes)) )
    for i in range(len(self.auxBoxes)):
      self.adjacency[i,i] = 1.0
      for j in range(i+1,len(self.auxBoxes)):
        iou = self.auxBoxes[i].IoU( self.auxBoxes[j] )
        self.adjacency[i,j] = iou
        self.adjacency[j,i] = iou
    
    knn = range(-2,-GRAPH_NEIGHBORS-2,-1) # Avoid last element (same box)
    self.graph = {'nodes':[], 'edges':[]}
    for i in range(len(self.auxBoxes)):
      center = self.auxBoxes[i].center()
      node = {'data':{'id':str(i), 'box':self.auxBoxes[i].box}, 
               'position':{'x':int(center[0]),'y':int(center[1])}}
      self.graph['nodes'].append(node)
      edges = {}
      neighbors = np.argsort( self.adjacency[i,:] )
      knn =  [ (j,self.adjacency[i,j]) for j in neighbors[-NEAREST_NEIGHBORS-2:-2] ]
      for j,iou in knn:
        edge = { 'data': { 'id': str(i)+'_'+str(j), 'weight': iou, 'source': str(i), 'target': str(j) } }
        self.graph['edges'].append(edge)
    
    t = cu.toc('Graph construction:',t)
    return 

    self.layout = []
    scaleRange = len(auxBoxes)/SCALES
    for s in range(SCALES):
      scaleElems = []
      self.layout.append([])
      for i in range(scaleRange):
        scaleElems.append(auxBoxes[scaleRange*s + i])
      scaleElems.sort(key=lambda x:x.box[0], reverse=True)
      horizontalRange = len(scaleElems)/HORIZONTAL_BINS
      for h in range(HORIZONTAL_BINS):
        horizontalRangeElems = []
        self.layout[s].append([])
        for j in range(horizontalRange):
          horizontalRangeElems.append(scaleElems[horizontalRange*h + j])
        horizontalRangeElems.sort(key=lambda x:x.box[1], reverse=True)
        verticalRange = len(horizontalRangeElems)/VERTICAL_BINS
        for v in range(VERTICAL_BINS):
          self.layout[s][h].append([])
          for k in range(verticalRange):
            self.layout[s][h][v].append(horizontalRangeElems[verticalRange*v + k])
    self.numBoxes = len(auxBoxes)
    self.boxesPerBin = float(self.numBoxes)/WORLD_SIZE
    self.actionCounter = 0
    self.scale = SCALES/2 # (Greedy: Set to zero)
    self.horizontal = 0
    self.vertical = 0
    self.percentExplored = 0
    self.selectedIds = []
    self.status = np.zeros( (SCALES, HORIZONTAL_BINS, VERTICAL_BINS), dtype=np.int32 )
    self.currentPosition = np.zeros( (SCALES, HORIZONTAL_BINS, VERTICAL_BINS), dtype=np.int32 )
Exemplo n.º 43
0
            fetched = sess.run(fetches, feed_dict)

            loss, p, gnorm = fetch(fetched, ('loss', 'p', 'gnorm'))
            tensors, tensor_shapes = fetch(fetched,
                                           ('tensors', 'tensor_shapes'))

            #####################################

            losses.append(loss)
            gnorms.append(gnorm)
            TRAIN_PRED.extend(np.squeeze(p))
            TRAIN_Y.extend(np.squeeze(b.y))

            word_count = batcher.word_count(reset=False)
            sec = U.toc(reset=False)
            wps = int(word_count / sec)

            if batch % FLAGS.print_every == 0:
                if ACC:
                    acc = U.nacc(TRAIN_Y[-k:], TRAIN_PRED[-k:])
                    sys.stdout.write('\tacc={0:0.3f}'.format(acc))
                else:
                    kappa = U.nkappa(TRAIN_Y[-k:], TRAIN_PRED[-k:])
                    sys.stdout.write('\tqwk={0:0.3f}'.format(kappa))
                sys.stdout.write('|loss={0:0.3f}'.format(loss))
                #sys.stdout.write('|ploss={0:0.3g}'.format(p_loss))
                sys.stdout.write('|wps={0}'.format(wps))
                sys.stdout.write('|bs={0}'.format(b.n))
                #sys.stdout.write('|gnm={0:0.2f}'.format(gnorm))
                sys.stdout.flush()
Exemplo n.º 44
0
def crank(V, L1, R1x, L2, R2x, dt, n, crumbs=[], callback=None):
    V = V.copy()
    theta = 0.5
    # dt *= 0.5

    # L1e = flatten_tensor(L1)
    L1e = L1.copy()
    L1i = L1e.copy()
    R1 = np.array(R1x).T

    # L2e = flatten_tensor(L2)
    L2e = L2.copy()
    L2i = L2e.copy()
    R2 = np.array(R2x)

    # print "L var"
    # fp(L2e.data, 2)
    # print "FD op var"
    # fp(F.operators[1].data, 2)

    # print "diff"
    # fp(F.operators[1].data - L2e.data, 2, 'f')

    # assert np.allclose(F.operators[1].data, L2e.data)

    m = 2

    # L  = (As + Ass - H.interest_rate*np.eye(nspots))*-dt + np.eye(nspots)

    L1i.data *= -theta*dt
    L1i.data[m, :] += 1
    # R1 *= dt

    L2i.data *= -theta*dt
    L2i.data[m, :] += 1
    # R2 *= dt

    offsets1 = (abs(min(L1i.offsets)), abs(max(L1i.offsets)))
    offsets2 = (abs(min(L2i.offsets)), abs(max(L2i.offsets)))

    dx = np.gradient(spots)[:,np.newaxis]
    dy = np.gradient(vars)
    X, Y = [dim.T for dim in np.meshgrid(spots, vars)]
    gradgrid = dt * coeffs[(0,1)](0, X, Y) / (dx*dy)
    gradgrid[:,0] = 0; gradgrid[:,-1] = 0
    gradgrid[0,:] = 0; gradgrid[-1,:] = 0

    print_step = max(1, int(n / 10))
    to_percent = 100.0 / n
    utils.tic("Crank:")
    R = R1 + R2
    normal_shape = V.shape
    transposed_shape = normal_shape[::-1]
    for k in xrange(n):
        if not k % print_step:
            if np.isnan(V).any():
                print "Crank fail @ t = %f (%i steps)" % (dt * k, k)
                return crumbs
            print int(k * to_percent),
        if callback is not None:
            callback(V, ((n - k) * dt))

        Vsv = np.gradient(np.gradient(V)[0])[1] * gradgrid

        # V12 = (V
                # + Vsv
                # + (1-theta)*dt*L1e.dot(V.T.flat).reshape(transposed_shape).T
                # + (1-theta)*dt*L2e.dot(V.flat).reshape(normal_shape)
                # + dt * R)

        # V1 = spl.solve_banded(offsets2, L2i.data, V12.flat, overwrite_b=True).reshape(normal_shape)
        # V  = spl.solve_banded(offsets1, L1i.data, V1.T.flat, overwrite_b=True).reshape(transposed_shape).T

        V1 = (L1e.dot(V.T.flat).reshape(transposed_shape)).T
        V2 = (L2e.dot(V.flat).reshape(normal_shape))
        Y0 = V + Vsv + dt*(V1 + V2 + R)

        V1 = Y0 - theta * dt * L1e.dot(V.T.flat).reshape(transposed_shape).T
        Y1 = spl.solve_banded(offsets1, L1i.data, V1.T.flat, overwrite_b=True).reshape(transposed_shape).T

        V2 = Y1 - theta * dt * L2e.dot(V.flat).reshape(normal_shape)
        Y2 = spl.solve_banded(offsets2, L2i.data, V2.flat, overwrite_b=True).reshape(normal_shape)
        V = Y2


        crumbs.append(V.copy())
    utils.toc()
    return crumbs
  inQueue.task_done()
  return True

##################################
# Organize boxes by source image
#################################
startTime = tic()

images = {}
for s,box in bboxes:
  b = map(int,box[1:]) + [s]
  try:
    images[ box[0] ].append(b)
  except:
    images[ box[0] ] = [b]
lap = toc('Reading boxes file:',startTime)

#################################
# Extract Features
#################################
totalItems = len(bboxes)
del(bboxes)
layers = {'fc6_neuron_cudanet_out': {'dim':4096,'idx':15} }
batch = 200
taskQueue = Queue.Queue()
p = threading.Thread(target=worker, args=(taskQueue, net, layers, batch))
p.daemon = True
p.start()

print 'Extracting features for',totalItems,'total images'
for name in images.keys():
        s = M[2 * i, 2 * i] + M[2 * i + 1, 2 * i + 1]
        M[2 * i, 2 * i] -= s / 2
        M[2 * i + 1, 2 * i + 1] -= s / 2

    tic('sampling')
    nsamples = 10**4
    burnin = 10**3
    lf_steps = 50
    step_sz = .15
    phi, E, diagn = sample_hmc(M,
                               nsamples,
                               burnin,
                               lf_steps,
                               step_sz,
                               diagnostics=True)
    toc('sampling')

    tic('fiting')
    M_hat = fit_model(phi)
    Mneg_hat, Mpos_hat = m2kappa(M_hat)
    # anti-symmetrize diagonal elements for estimation matrix
    for i in np.arange(M_hat.shape[0] / 2):
        s = M_hat[2 * i, 2 * i] + M_hat[2 * i + 1, 2 * i + 1]
        M_hat[2 * i, 2 * i] -= s / 2
        M_hat[2 * i + 1, 2 * i + 1] -= s / 2
    toc('fiting')

    M_error = M - M_hat
    M_max = max(abs(M).max(), abs(M_hat).max())
    print 'M_error norm = ', (M_error**2).sum()
Exemplo n.º 47
0
V = np.copy(V_init)
# bs, delta = [x for x in bs_call_delta(spots[:, newaxis], k, r,
                                      # np.sqrt(vars)[newaxis, :], t)]

bs = BlackScholesOption(spot=spots[:, np.newaxis],
                        strike=k,
                        interest_rate=r,
                        variance=vars[np.newaxis, :],
                        tenor=t).analytical
utils.tic("Heston Analytical:")
# hss = array([hs_call(spots, k, r, np.sqrt(vars),
             # dt*i, kappa, theta, sigma, rho) for i in range(int(t/dt)+1)])
# hs = hss[-1]
hs = hs_call_vector(spots, k, r, np.sqrt(vars),
             t, kappa, theta, sigma, rho)
utils.toc()
hs[isnan(hs)] = 0.0
if max(hs.flat) > spots[-1] * 2:
    BADANALYTICAL = True
    print "Warning: Analytical solution looks like trash."

if len(sys.argv) > 1:
    if sys.argv[1] == '0':
        print "Bail out with arg 0."
        sys.exit()

L1_ = []
R1_ = []
utils.tic("Building As(s):")
print "(Up/Down)wind from:", flip_idx_spot
As_ = utils.nonuniform_complete_coefficients(dss, up_or_down=up_or_down_spot,
Exemplo n.º 48
0
      for i in range(len(self.imgBoxes[img])):
        box = map( int, self.imgBoxes[img][i,:].tolist() )
        key = img + ' ' + ' '.join( map(str, box) )
        try: score = records[key]
        except: score = -10.0
        self.scores[img][i,fileIdx] = score

  def saveDB(self, outputDir):
    for img in self.imgBoxes.keys():
      data = {'boxes':self.imgBoxes[img], 'scores':self.scores[img]}
      scipy.io.savemat(outputDir+'/'+img+'.mat', data, do_compression=True)
    out = open(outputDir+'/categories.txt','w')
    for c in self.categories:
      out.write(c + '\n')
    out.close()

if __name__ == "__main__":
  params = cu.loadParams('scoresDirectory proposalsFile outputDir')
  cu.mem('Program started')
  lap = tic()
  builder = DBBuilder(params['scoresDirectory'], params['proposalsFile'])
  lap = toc('Proposals loaded', lap)
  cu.mem('DB initialized')
  builder.parseDir()
  lap = toc('Directory parsed', lap)
  cu.mem('All files read')
  builder.saveDB(params['outputDir'])
  lap = toc('Database saved', lap)
  cu.mem('Program ends')

Exemplo n.º 49
0
    return True


##################################
# Organize boxes by source image
#################################
startTime = tic()

images = {}
for s, box in bboxes:
    b = map(int, box[1:]) + [s]
    try:
        images[box[0]].append(b)
    except:
        images[box[0]] = [b]
lap = toc('Reading boxes file:', startTime)

#################################
# Extract Features
#################################
totalItems = len(bboxes)
del (bboxes)
layers = {'fc6_neuron_cudanet_out': {'dim': 4096, 'idx': 15}}
batch = 200
taskQueue = Queue.Queue()
p = threading.Thread(target=worker, args=(taskQueue, net, layers, batch))
p.daemon = True
p.start()

print 'Extracting features for', totalItems, 'total images'
for name in images.keys():
Exemplo n.º 50
0
  np.savez_compressed(outf,matrix)
  outf.close()

##################################
# Organize boxes by source image
#################################
startTime = tic()

images = {}
for s,box in bboxes:
  b = map(int,box[1:]) + [s]
  try:
    images[ box[0] ].append(b)
  except:
    images[ box[0] ] = [b]
lap = toc('Reading boxes file:',startTime)

#################################
# Extract Features
#################################
totalItems = len(bboxes)
del(bboxes)
layers = {'fc6_neuron_cudanet_out': {'dim':4096,'idx':15}}
#layers = {'fc6_neuron_cudanet_out': {'dim':4096,'idx':15},'conv3_cudanet_out': {'dim':64896,'idx':9}}

batch = 200

print 'Extracting features for',totalItems,'total images'
for name in images.keys():
  # Check if files already exist
  processed = 0
Exemplo n.º 51
0
    dim = 3

    M = np.random.randn(2 * dim, 2 * dim)
    M += M.T.copy()
    for i in np.arange(M.shape[0] / 2):
        s = M[2 * i, 2 * i] + M[2 * i + 1, 2 * i + 1]
        M[2 * i, 2 * i] -= s / 2
        M[2 * i + 1, 2 * i + 1] -= s / 2

    tic("sampling")
    nsamples = 10 ** 4
    burnin = 10 ** 3
    lf_steps = 50
    step_sz = 0.15
    phi, E, diagn = sample_hmc(M, nsamples, burnin, lf_steps, step_sz, diagnostics=True)
    toc("sampling")

    tic("fiting")
    M_hat = fit_model(phi)
    Mneg_hat, Mpos_hat = m2kappa(M_hat)
    # anti-symmetrize diagonal elements for estimation matrix
    for i in np.arange(M_hat.shape[0] / 2):
        s = M_hat[2 * i, 2 * i] + M_hat[2 * i + 1, 2 * i + 1]
        M_hat[2 * i, 2 * i] -= s / 2
        M_hat[2 * i + 1, 2 * i + 1] -= s / 2
    toc("fiting")

    M_error = M - M_hat
    M_max = max(abs(M).max(), abs(M_hat).max())
    print "M_error norm = ", (M_error ** 2).sum()
Exemplo n.º 52
0
def mainLoop(modelType, modelArgs, positives, trainingList, featuresDir,
             featuresExt, modelOut, maxNegOverlap, iter):
    pos, posIdx, ari, osi = positives
    startTime = cu.tic()
    if iter == 0:
        ## Random Negatives
        print ' >>> RANDOM NEGATIVES'
        neg, negIdx = learn.getRandomNegs(featuresDir, trainingList,
                                          featuresExt, pos.shape[1],
                                          maxVectorsCache, maxNegativeImages)
        detectionsList = [[x[0], '0.0'] + x[1:] + ['1'] for x in negIdx]
        hards = {'features': np.zeros((0, neg.shape[1])), 'index': []}
        lap = cu.toc(
            'Random negatives matrix (' + str(neg.shape[0]) + ' instances)',
            startTime)
    else:
        ## Mine hard negatives
        print ' >>> MINING HARD NEGATIVES'
        model = det.createDetector(modelType, modelArgs)
        model.load(modelOut + '.' + str(iter - 1))
        detectionsList = detector.detectObjects(
            model, trainingList, featuresDir, featuresExt, 1.0, -10.0
        )  # For RCNN the overlap parameter is 0.3 not 1.0(no suppression)
        hards = cu.loadMatrixNoCompression(modelOut + '.hards').item()
        lap = cu.toc(
            'Hard negatives matrix (' + str(hards['features'].shape[0]) +
            ' instances)', startTime)

    ## Rank and clean negative detections
    detectionsData = evaluation.loadDetections(detectionsList)
    groundTruth = evaluation.loadGroundTruthAnnotations(posIdx)
    detectionsLog = evaluation.evaluateDetections(
        groundTruth, detectionsData, 0.5,
        allowDuplicates=False)  #,overlapMeasure=det.overlap
    evaluation.computePrecisionRecall(len(posIdx), detectionsLog['tp'],
                                      detectionsLog['fp'], 'tmp.txt')
    evaluation.computePrecAt(detectionsLog['tp'],
                             [20, 50, 100, 200, 300, 400, 500])
    logData = learn.parseRankedDetectionsFile(detectionsLog['log'],
                                              maxNegOverlap,
                                              maxNegativeVectors)
    print ' >>> LOADING HARD NEGATIVES'
    neg, negIdx = learn.loadHardNegativesFromList(featuresDir,
                                                  logData['negExamples'],
                                                  featuresExt, pos.shape[1],
                                                  logData['negTaken'])
    del (detectionsList, detectionsData, detectionsLog, logData)
    lap = cu.toc(
        'Ranked negatives matrix (' + str(neg.shape[0]) + ' instances)', lap)
    neg = np.concatenate((neg, hards['features']))
    negIdx = negIdx + hards['index']

    ## Learn Detector
    clf = det.createDetector(modelType, modelArgs)
    clf.learn(pos, neg, posIdx, negIdx)
    clf.save(modelOut + '.' + str(iter))
    lap = cu.toc('Classifier learned:', lap)

    ## Keep hard negatives for next iterations
    scores = clf.predict(neg, negIdx)
    hardNegsIdx = np.argsort(scores)
    hardNeg = np.concatenate(
        (hards['features'], neg[hardNegsIdx[-cu.topHards:]]))
    negIdx = hards['index'] + [negIdx[j] for j in hardNegsIdx[-cu.topHards:]]
    print 'Hard negatives:', hardNeg.shape[0]
    hards = {'features': hardNeg, 'index': negIdx}
    cu.saveMatrixNoCompression({
        'features': hardNeg,
        'index': negIdx
    }, modelOut + '.hards')

    print ' ** Iteration', iter, 'done'
    return {
        'detector': clf,
        'pos': pos,
        'posIdx': posIdx,
        'neg': neg,
        'negIdx': negIdx
    }