コード例 #1
0
def _main(nome_do_video, nome_do_arquivo_de_saida, modo_selecionado,
          parametro):

    show = lp.getInJson('tracker', 'show')
    opts = configParams()
    opts = getOpts(opts)
    #add
    mode = int(modo_selecionado)
    caminhoDataset = lp.getInJson('tracker', 'datasetPath')
    caminhoVideo = os.path.join(caminhoDataset, nome_do_video)
    caminhoLog = os.path.join(caminhoVideo, '__log__')
    nome_log = nome_do_arquivo_de_saida
    FRAMES_TO_ACUMULATE_BEFORE_FEEDBACK = int(parametro)

    #REDE 1
    exemplarOp = tf.placeholder(
        tf.float32, [1, opts['exemplarSize'], opts['exemplarSize'], 3])
    instanceOp = tf.placeholder(
        tf.float32,
        [opts['numScale'], opts['instanceSize'], opts['instanceSize'], 3])
    exemplarOpBak = tf.placeholder(tf.float32, [
        opts['trainBatchSize'], opts['exemplarSize'], opts['exemplarSize'], 3
    ])
    instanceOpBak = tf.placeholder(tf.float32, [
        opts['trainBatchSize'], opts['instanceSize'], opts['instanceSize'], 3
    ])
    isTrainingOp = tf.convert_to_tensor(False,
                                        dtype='bool',
                                        name='is_training')
    sn = SiameseNet()
    scoreOpBak = sn.buildTrainNetwork(exemplarOpBak,
                                      instanceOpBak,
                                      opts,
                                      isTraining=False)
    saver = tf.train.Saver()
    #writer = tf.summary.FileWriter(opts['summaryFile'])
    sess = tf.Session()
    sess2 = tf.Session()
    saver.restore(sess, opts['modelName'])
    saver.restore(sess2, opts['modelName'])
    zFeatOp = sn.buildExemplarSubNetwork(exemplarOp, opts, isTrainingOp)

    #REDE2
    exemplarOp2 = tf.placeholder(
        tf.float32, [1, opts['exemplarSize'], opts['exemplarSize'], 3])
    instanceOp2 = tf.placeholder(
        tf.float32,
        [opts['numScale'], opts['instanceSize'], opts['instanceSize'], 3])
    exemplarOpBak2 = tf.placeholder(tf.float32, [
        opts['trainBatchSize'], opts['exemplarSize'], opts['exemplarSize'], 3
    ])
    instanceOpBak2 = tf.placeholder(tf.float32, [
        opts['trainBatchSize'], opts['instanceSize'], opts['instanceSize'], 3
    ])
    isTrainingOp2 = tf.convert_to_tensor(False,
                                         dtype='bool',
                                         name='is_training')
    sn2 = SiameseNet()
    #	scoreOpBak2 = sn2.buildTrainNetwork(exemplarOpBak2, instanceOpBak2, opts, isTraining=False)
    saver2 = tf.train.Saver()
    #writer2 = tf.summary.FileWriter(opts['summaryFile'])
    sess2 = tf.Session()
    saver2.restore(sess2, opts['modelName'])
    zFeatOp2 = sn2.buildExemplarSubNetwork(exemplarOp2, opts, isTrainingOp2)

    #imgs, targetPosition, targetSize = loadVideoInfo(caminhoDataset, nome_do_video)
    imgFiles, targetPosition, targetSize = loadVideoInfo(
        caminhoDataset, nome_do_video)

    nImgs = len(imgFiles)
    #imgs_pil =  [Image.fromarray(np.uint8(img)) for img in imgs]

    im = get_next_frame(imgFiles, POSICAO_PRIMEIRO_FRAME)

    if (im.shape[-1] == 1):
        tmp = np.zeros([im.shape[0], im.shape[1], 3], dtype=np.float32)
        tmp[:, :, 0] = tmp[:, :, 1] = tmp[:, :, 2] = np.squeeze(im)
        im = tmp

    avgChans = np.mean(
        im, axis=(0, 1)
    )  # [np.mean(np.mean(img[:, :, 0])), np.mean(np.mean(img[:, :, 1])), np.mean(np.mean(img[:, :, 2]))]
    wcz = targetSize[1] + opts['contextAmount'] * np.sum(targetSize)
    hcz = targetSize[0] + opts['contextAmount'] * np.sum(targetSize)
    sz = np.sqrt(wcz * hcz)
    scalez = opts['exemplarSize'] / sz

    zCrop, _ = getSubWinTracking(im, targetPosition,
                                 (opts['exemplarSize'], opts['exemplarSize']),
                                 (np.around(sz), np.around(sz)), avgChans)
    zCrop2, _ = getSubWinTracking(im, targetPosition,
                                  (opts['exemplarSize'], opts['exemplarSize']),
                                  (np.around(sz), np.around(sz)), avgChans)

    if opts['subMean']:
        pass

    dSearch = (opts['instanceSize'] - opts['exemplarSize']) / 2
    pad = dSearch / scalez
    sx = sz + 2 * pad
    minSx = 0.2 * sx
    maxSx = 5.0 * sx
    winSz = opts['scoreSize'] * opts['responseUp']
    if opts['windowing'] == 'cosine':

        hann = np.hanning(winSz).reshape(winSz, 1)
        window = hann.dot(hann.T)
    elif opts['windowing'] == 'uniform':
        window = np.ones((winSz, winSz), dtype=np.float32)

    window = window / np.sum(window)
    scales = np.array([
        opts['scaleStep']**i
        for i in range(int(np.ceil(opts['numScale'] / 2.0) - opts['numScale']),
                       int(np.floor(opts['numScale'] / 2.0) + 1))
    ])

    #REDE1
    zCrop = np.expand_dims(zCrop, axis=0)
    zFeat = sess.run(zFeatOp, feed_dict={exemplarOp: zCrop})
    zFeat = np.transpose(zFeat, [1, 2, 3, 0])
    template = tf.constant(zFeat, dtype=tf.float32)
    oldTemplate = tf.constant(zFeat, dtype=tf.float32)
    scoreOp = sn.buildInferenceNetwork(instanceOp, template, opts,
                                       isTrainingOp)
    #writer.add_graph(sess.graph)

    #REDE2
    zCrop_original = np.array(zCrop)
    zFeat_original = sess2.run(zFeatOp2,
                               feed_dict={exemplarOp2: zCrop_original})
    zFeat_original = np.transpose(zFeat_original, [1, 2, 3, 0])
    template_original = tf.constant(zFeat_original, dtype=tf.float32)
    #template = np.array(template_original)
    template = tf.identity(template_original)
    oldTemplate = tf.identity(template_original)
    template_acumulado = np.array(template)
    scoreOp_original = sn.buildInferenceNetwork(instanceOp, template_original,
                                                opts, isTrainingOp)
    #writer2.add_graph(sess2.graph)

    teste1 = tf.constant(zFeat, dtype=tf.float32)
    teste2 = tf.Session().run(teste1)
    teste3 = tf.constant(teste2, dtype=tf.float32)

    #assert 2 == 1

    tic = time.time()
    ltrb = []

    superDescritor = SuperTemplate()
    superDescritor.addInstance(np.array(zFeat))

    print('zfeat:', zFeat[0, 0, -10, 0])

    for frame in range(POSICAO_PRIMEIRO_FRAME, nImgs):

        im = get_next_frame(imgFiles, frame)

        print(('frame ' + str(frame + 1) + ' / ' + str(nImgs)).center(80, '*'))

        if frame > POSICAO_PRIMEIRO_FRAME:

            zCrop, _ = getSubWinTracking(
                im, targetPosition,
                (opts['exemplarSize'], opts['exemplarSize']),
                (np.around(sz), np.around(sz)), avgChans)
            zCrop = np.expand_dims(zCrop, axis=0)
            zFeat = sess.run(zFeatOp, feed_dict={exemplarOp: zCrop})
            zFeat = np.transpose(zFeat, [1, 2, 3, 0])
            zFeat.reshape(1, NUMBER_OF_EXEMPLAR_DESCRIPTOR,
                          NUMBER_OF_EXEMPLAR_DESCRIPTOR,
                          SIAMESE_DESCRIPTOR_DIMENSION)

            if frame < FRAMES_TO_ACUMULATE_BEFORE_FEEDBACK:
                superDescritor.addInstance(np.array(zFeat_original))
            else:
                superDescritor.addInstance(np.array(zFeat))

            if (im.shape[-1] == 1):  # se a imagem for em escala de cinza
                tmp = np.zeros([im.shape[0], im.shape[1], 3], dtype=np.float32)
                tmp[:, :, 0] = tmp[:, :, 1] = tmp[:, :, 2] = np.squeeze(im)
                im = tmp
            scaledInstance = sx * scales
            scaledTarget = np.array([targetSize * scale for scale in scales])
            xCrops = makeScalePyramid(im, targetPosition, scaledInstance,
                                      opts['instanceSize'], avgChans, None,
                                      opts)

            template = superDescritor.mediaMovelGaussiana(size=frame,
                                                          mode=mode)

            with tf.Session() as sess1:
                template = sess1.run(template)
                template = tf.constant(template, dtype=tf.float32)

            #template_espacial = spatialTemplate (targetPosition,im, opts, sz, avgChans,sess,zFeatOp,exemplarOp,FRAMES_COM_MEDIA_ESPACIAL,amplitude = 0, cumulative = False, adaptative = False )
            #template = superDescritor.cummulativeTemplate()
            #template = superDescritor.progressiveTemplate()
            #template = superDescritor.nShotTemplate(3)
            #

            #template = template_original

            #filtro adaptativo logo abaixo:
            #template = filtroAdaptativo(template,zFeat,parametro)
            #~filtro adaptativo

            scoreOp = sn.buildInferenceNetwork(instanceOp, template, opts,
                                               isTrainingOp)
            score = sess.run(scoreOp, feed_dict={instanceOp: xCrops})
            #sio.savemat('score.mat', {'score': score})
            newTargetPosition, newScale = trackerEval(score, round(sx),
                                                      targetPosition, window,
                                                      opts)
            targetPosition = newTargetPosition
            sx = max(
                minSx,
                min(maxSx, (1 - opts['scaleLr']) * sx +
                    opts['scaleLr'] * scaledInstance[newScale]))
            targetSize = (1 - opts['scaleLr']) * targetSize + opts[
                'scaleLr'] * scaledTarget[newScale]

        else:
            pass

        rectPosition = targetPosition - targetSize / 2.
        tl = tuple(np.round(rectPosition).astype(int)[::-1])
        br = tuple(np.round(rectPosition + targetSize).astype(int)[::-1])
        if show:  # plot only if it is in a desktop that allows you to watch the video
            imDraw = im.astype(np.uint8)
            cv2.putText(imDraw,
                        str(frame + 1) + '/' + str(nImgs), (0, 25),
                        cv2.FONT_HERSHEY_DUPLEX, 1, (0, 255, 255), 2)
            cv2.rectangle(imDraw, tl, br, (0, 255, 255), thickness=3)
            cv2.imshow("tracking - siamese", imDraw)
            cv2.waitKey(1)
        ltrb.append(list(tl) + list(br))

    with open(os.path.join(caminhoLog, nome_log), 'w') as file:
        linhas = []
        for i in ltrb:
            linha = ''
            for cont, coord in enumerate(i):
                if cont == 3:
                    linha = linha + str(coord) + '\n'
                else:
                    linha = linha + str(coord) + ','
            linhas.append(linha)
        for i in linhas:
            file.write(i)
    print(time.time() - tic)
    return
コード例 #2
0
def overall(dim, TESTE_MODE=False):

    if TESTE_MODE:
        dim = 25
        print("A dimensao utilizada para teste sera: ", dim)
        input("Verifique as condiçoes no inicio do codigo")

    else:
        dim = int(dim)

    # Variaveis obtidas atraves do arquivo parameter.json
    #--------------------------------------------------------------------
    PATH_GT = lp.getInJson('tracker', 'gtPath')
    PATH_GT_ESPACIAL = lp.getInJson('tracker', 'gtEspacialPath')
    PATH_SIAMESE = lp.getInJson('tracker', 'siamesePath')
    PATH_SIAMESE_ESPACIAL = lp.getInJson('tracker', 'siameseEspacialPath')
    SHOW = lp.getInJson('tracker', 'show')

    #--------------------------------------------------------------------

    def getPathPickle(tipo):
        if tipo == 'gt':
            caminhoPickle = PATH_GT
        if tipo == 'siam':
            caminhoPickle = PATH_SIAMESE
        if tipo == 'gt_espacial':
            caminhoPickle = PATH_GT_ESPACIAL
        if tipo == 'siam_espacial':
            caminhoPickle = PATH_SIAMESE_ESPACIAL
        return caminhoPickle

    def getSignal(tipo, video, xx, yy):

        caminhoPickle = getPathPickle(tipo)
        fullPath = os.path.join(caminhoPickle, tipo + '_' + video + '.pickle')
        file_siamese = open(fullPath, 'rb')
        list_zFeat = np.array(pickle.load(file_siamese))
        file_siamese.close()
        media = []
        pontos_de_recorte = [
            i for i in range(0, int(list_zFeat.shape[0] +
                                    1), int(list_zFeat.shape[0] / 9))
        ]
        MEDIA_ESPACIAL = 1
        for i in range(MEDIA_ESPACIAL):
            media.append(list_zFeat[
                pontos_de_recorte[i]:pontos_de_recorte[i + 1], :, :, :, :])
        media = np.array(media)
        result = np.mean(media, axis=0)
        acc = np.zeros([int(list_zFeat.shape[0] / 9), 256])
        for i in range(xx, xx + 1):
            for j in range(yy, yy + 1):
                acc[:, :] = result[:, xx, yy, :, 0] + acc[:, :]
        return acc

    def getSignal2(tipo, video, xx, yy):

        caminhoPickle = getPathPickle(tipo)
        fullPath = os.path.join(caminhoPickle, video + '.pickle')
        file_siamese = open(fullPath, 'rb')
        list_zFeat = np.array(pickle.load(file_siamese))
        file_siamese.close()
        singleZFeat = np.zeros([int(list_zFeat.shape[0]), 256])
        for i in range(xx, xx + 1):
            for j in range(yy, yy + 1):
                singleZFeat[:, :] = list_zFeat[:, xx, yy, :, 0]

        return singleZFeat  # t e d, mas apenas uma de x e y

    filtro = np.zeros(SIZE_FILTER) / SIZE_FILTER

    def calcularFFT(sign, escala='dB'):
        gamma = abs(10**-12)  # evita log 0
        if escala == 'dB':
            dividendo = abs(np.fft.fftshift(np.fft.fft(sign))) + gamma
            divisor = max(abs(np.fft.fft(sign))) + gamma
            fft = 10 * np.log10(dividendo / divisor)
            return fft
        else:
            fft = abs(np.fft.fftshift(np.fft.fft(sign)))
            return fft

    def plotar(sinais, ffts):

        sinais = list(sinais)
        ffts = list(ffts)
        nSinais = len(sinais)
        for cont in range(len(sinais)):
            #plot do sinal e da fft
            plt.subplot(nSinais, 2, 2 * cont + 1)
            plt.plot(sinais[cont])
            plt.subplot(nSinais, 2, 2 * cont + 2)
            plt.plot(ffts[cont])
        plt.show()

    def erroQuadraticoMedio(sinal1, sinal2):
        sinal1 = np.array(sinal1)
        sinal2 = np.array(sinal2)
        erro = sinal1 - sinal2
        erroQuadratico = erro**2
        MSE = np.mean(erroQuadratico)
        return MSE

    def alinharSinais(sinalMaior, sinalMenor):
        '''
		alinha o sinal maior com o sinal menor. Esta função tira a ultima parte do sinal maior.
		*o erro quadratico medio deve ser 0 ao encontrar o filtro impulsivo em um processo de otimizacao
		'''
        #casting de seguranca

        #print("first debug in function: ", sinalMaior.shape)
        sinalMenor = np.array(sinalMenor)
        sinalMaior = np.array(sinalMaior)
        '''
		diferencaDimensional = sinalMaior.shape[0] - sinalMenor.shape[0]
		sinalMaior = sinalMaior[: sinalMaior.shape[0] - diferencaDimensional]
		sinalMenor = sinalMenor.ravel(-1)
		sinalMaior = sinalMaior.ravel(-1)
		return sinalMaior,sinalMenor
		'''

        dim_N = np.max(sinalMenor.shape)
        dim_M = np.max(sinalMaior.shape)

        #print("dim_N: ",dim_N, " dim_M: ", dim_M)

        sinalMaior = sinalMaior[dim_M - dim_N:dim_M]
        #print("debug in function: sinal maior", sinalMaior.shape, " sinal menor: ", sinalMenor.shape )
        return sinalMaior, sinalMenor

    '''
	**********************************************************************************************
	**********************************************************************************************
	******************************************* PARTE 2 ******************************************
	**********************************************************************************************
	**********************************************************************************************
	'''

    import random

    from deap import base
    from deap import creator
    from deap import tools

    GAMMA = 10**-10
    MENOR_AMPLITUDE = -100
    MAIOR_AMPLITUDE = 100
    PROBABILIDADE_MUTACAO_GENE = 1 / SIZE_FILTER
    PROBABILIDADE_MUTACAO_INDIVIDUO = 0.9
    PROBABILIDADE_CROSS_OVER = 0.95
    POPULACAO = 5 * SIZE_FILTER
    PORCENTAGEM = 10
    TAMANHO_DO_TORNEIO = round(POPULACAO * PORCENTAGEM / 100)
    MAX_ITE = 300

    creator.create("FitnessMin", base.Fitness, weights=(-1.0, ))
    creator.create("Individual", list, fitness=creator.FitnessMin)

    toolbox = base.Toolbox()
    # Attribute generator
    toolbox.register(
        "attr_int", random.randint, MENOR_AMPLITUDE, MAIOR_AMPLITUDE
    )  # nosso filtro vai ter uma resolução de apenas 1000 valores
    # Structure initializers
    toolbox.register("individual", tools.initRepeat, creator.Individual,
                     toolbox.attr_int,
                     SIZE_FILTER)  # teremos um filtro de 41 dimensoes
    toolbox.register("population", tools.initRepeat, list, toolbox.individual)

    sinal_siamese_master = []
    sinal_gt_master = []

    sinal_siamese_master_media = []

    for video in range(len(listaDeVideos)):
        sinal_siamese_master.append([])
        sinal_gt_master.append([])
        sinal_siamese_master_media.append([])
        for xx in range(6):
            sinal_siamese_master[video].append([])
            sinal_gt_master[video].append([])
            sinal_siamese_master_media[video].append([])
            for yy in range(6):
                sinal_siamese_master[video][xx].append([])
                sinal_gt_master[video][xx].append([])
                sinal_siamese_master_media[video][xx].append([])

    for numero_video, video in enumerate(listaDeVideos):
        for xx in range(6):
            for yy in range(6):

                fullSignalSiam = getSignal2(ORIGEM, video, xx, yy)
                sinal_siamese = np.ravel(fullSignalSiam[:, dim])
                sinal_siamese_master[numero_video][xx][yy] = np.array(
                    sinal_siamese)
                if video in listaDeVideos1:  # adiciona ruido apenas a uma classe de videos(por exempli, apenas videos bons)
                    sinal_siamese_master[numero_video][xx][yy] = np.array(
                        sinal_siamese) + (np.random.rand(
                            len(sinal_siamese_master[numero_video][xx][yy])) -
                                          0.5) * AMPLITUDE_RUIDO
                else:
                    sinal_siamese_master[numero_video][xx][yy] = np.array(
                        sinal_siamese)
                sinal_siamese_master_media[numero_video][xx][yy] = np.mean(
                    np.array(sinal_siamese[0:TAMANHO_MEDIA_INICIAL]))

                fullSignalGt = getSignal2(ALVO, video, xx, yy)
                sinal_gt = np.ravel(fullSignalGt[:, dim])
                sinal_gt_master[numero_video][xx][yy] = np.array(sinal_gt)
    '''
	sen_exp = np.sin([ (i/4) for i in range(196)]) * np.array([ (np.exp(-0.02*i)) for i in range(1,197)])
	print(sen_exp)
	sinal_siamese_master = np.array( sen_exp + np.sin([ i/7 for i in range(196)]))
	sinal_gt_master = np.sin([ (i/4) for i in range(196)])
	'''

    #print('done')
    #print(sinal_gt_master[0][0])

    def evalFilter(individual):
        individual = [float(i) for i in individual]
        filtro = np.array(individual) / sum(
            [abs(i) + GAMMA for i in individual])
        corr = signal.correlate(sinal_siamese_master, filtro, mode='same')
        #sinal,corr = alinharSinais(sinal_siamese_master,corr)
        #MSE = erroQuadraticoMedio(sinal,corr)
        #return MSE,

    def evalFilter2(individual):
        individual = [float(i) for i in individual]

        filtro = np.array(individual) / sum(
            [abs(i) + GAMMA for i in individual])
        MSE = 0

        for video in range(len(listaDeVideos)):
            for xx in range(6):
                for yy in range(6):

                    #corr = signal.convolve(sinal_siamese_master[xx][yy],filtro,  mode='full') # alterar para correlate!
                    #sinal,corr = alinharSinais(corr,sinal_gt_master[xx][yy])
                    sigRef = sinal_siamese_master[video][xx][yy]
                    if (MEDIA_INICIAL_DEFINED):
                        sigRef = sigRef * 0.5 + sinal_siamese_master_media[
                            numero_video][xx][yy] * 0.5
                    if (GAUSSIAN_DEFINED):
                        sigRef = np.correlate(sigRef,
                                              gaussianFilter,
                                              mode='same')
                    corr = np.correlate(sigRef, filtro,
                                        mode='full')  # alterar para correlate!
                    sinal, corr = alinharSinais(corr,
                                                sinal_gt_master[video][xx][yy])
                    #print("sinal e corr: ",sinal.shape, corr.shape)
                    MSE += erroQuadraticoMedio(corr, sinal)
        return MSE,

    '''
	#setado para o caso real
	'''

    toolbox.register("evaluate", evalFilter2)
    toolbox.register("mate", tools.cxTwoPoint)
    toolbox.register(
        "mutate",
        tools.mutUniformInt,
        indpb=PROBABILIDADE_MUTACAO_GENE,
        low=MENOR_AMPLITUDE,
        up=MAIOR_AMPLITUDE)  #, MENOR_AMPLITUDE, MAIOR_AMPLITUDE,indpb=0.05 )
    toolbox.register("select",
                     tools.selTournament,
                     tournsize=TAMANHO_DO_TORNEIO)

    hof = tools.HallOfFame(10)

    def main():
        pop = toolbox.population(
            n=POPULACAO)  # nossa população inicial sera de 300

        # Evaluate the entire population
        fitnesses = list(map(toolbox.evaluate, pop))

        for ind, fit in zip(pop, fitnesses):

            ind.fitness.values = fit

        CXPB = PROBABILIDADE_CROSS_OVER
        MUTPB = PROBABILIDADE_MUTACAO_INDIVIDUO

        # Extracting all the fitnesses of
        fits = [ind.fitness.values[0] for ind in pop]

        # Variable keeping track of the number of generations
        g = 0

        # Begin the evolution
        while g < MAX_ITE:
            # A new generation
            g = g + 1
            print("-- Generation %i --" % g)

            # Select the next generation individuals
            offspring = toolbox.select(pop, len(pop))
            '''
			#print("Numero de individuos: ",len(offspring))
			#print(offspring)
			#input('aperta qualquer coisa')
			'''
            # Clone the selected individuals
            offspring = list(map(toolbox.clone, offspring))

            # Apply crossover and mutation on the offspring
            for child1, child2 in zip(offspring[::2], offspring[1::2]):
                if random.random() < CXPB:

                    #print('individuos cruzados: ',toolbox.mate(child1, child2))
                    del child1.fitness.values
                    del child2.fitness.values

            for mutant in offspring:
                if random.random() < MUTPB:
                    toolbox.mutate(mutant)
                    del mutant.fitness.values

            # Evaluate the individuals with an invalid fitness
            invalid_ind = [ind for ind in offspring if not ind.fitness.valid]

            fitnesses = map(toolbox.evaluate, invalid_ind)
            for ind, fit in zip(invalid_ind, fitnesses):
                ind.fitness.values = fit
            #print("populacao de pais: ", pop)
            hof.update(pop)
            pop[:] = offspring

            # Gather all the fitnesses in one list and print the stats
            fits = [ind.fitness.values[0] for ind in pop]

            print(hof[0])

            length = len(pop)
            mean = sum(fits) / length
            sum2 = sum(x * x for x in fits)
            std = abs(sum2 / length - mean**2)**0.5

            print("  Min %s" % min(fits))
            print("  Max %s" % max(fits))
            print("  Avg %s" % mean)
            print("  Std %s" % std)
        return hof[0]

    vet = main()

    fileName = 'version.json'
    KEY = 'version'

    with open(fileName) as file:
        data = json.load(file)
        print("data value: ", data[KEY])

    arquivo = os.path.join(lp.getInJson("tracker", "filterFolder"),
                           'filtro_v' + str(data[KEY]),
                           "filtro_" + str(dim) + ".pickle")
    pickle_var = open(arquivo, "wb")
    pickle.dump(np.array(vet), pickle_var)
    pickle_var.close()
    return np.array(vet)
コード例 #3
0
    with open(os.path.join(caminhoLog, nome_log), 'w') as file:
        linhas = []
        for i in ltrb:
            linha = ''
            for cont, coord in enumerate(i):
                if cont == 3:
                    linha = linha + str(coord) + '\n'
                else:
                    linha = linha + str(coord) + ','
            linhas.append(linha)
        for i in linhas:
            file.write(i)
    print(time.time() - tic)
    return


if __name__ == '__main__':

    args = _get_Args()
    listCustom = [args.video, args.nomeSaida, args.modo, args.parametro]
    listDefault = [
        lp.getInJson('process', 'video_teste'),
        lp.getInJson('process', 'nome_saida'), 0,
        lp.getInJson('process', 'parametro')
    ]
    listaArgs = [
        argumentoDefault if argumentoCustom == None else argumentoCustom
        for argumentoCustom, argumentoDefault in zip(listCustom, listDefault)
    ]  # colcoar argumentos default caso nao sejam passados argumentos costumizaveis
    _main(listaArgs[0], listaArgs[1], listaArgs[2], listaArgs[3])
コード例 #4
0
import os
import multiprocessing
import math
import localParameters as lp


NUM_THREAD 			= int(lp.getInJson('datasetRunner','threads'))
LISTA_DE_PARAMETROS = lp.getInJson('datasetRunner','parametros')
CAMINHO_VOT_2015 	= str(lp.getInJson('tracker','datasetPath'))
PATH_SCRIPT 		= str(lp.getInJson('tracker','trackerPath'))
NOME_ARQUIVO_SAIDA 	= str(lp.getInJson('process', 'nome_saida'))
BASH_PYTHON 		= str(lp.getInJson('sistema','python'))
LOG_FOLDER 			= lp.getInJson('tracker','log_folder')
LISTA_DE_MODOS		= lp.getInJson('datasetRunner', 'mode')

def get_list_videos(parametro):
	listVideos = []
	for i in os.listdir(CAMINHO_VOT_2015):
		if(not i.startswith('_')) and (os.path.isdir(os.path.join(CAMINHO_VOT_2015,i))):
			if(not ((NOME_ARQUIVO_SAIDA+str(parametro)) in os.listdir(os.path.join(CAMINHO_VOT_2015,i,LOG_FOLDER)))):
				listVideos.append(i)
	listVideos.sort()
	return listVideos

def get_new_list_video(listVideos, n_partes):
	new_list_videos = []
	aux = []
	cont = 0

	for video in listVideos:
		if cont == math.ceil(len(listVideos) / n_partes):
コード例 #5
0
	
	x = str(x)
	k = tl.overall(x)
	print(k)
	return k	


if __name__ == '__main__':
	fileName = 'version.json'
	KEY = 'version'

	with open(fileName) as file:
		data = json.load(file)
	data[KEY] += 1

	os.mkdir(os.path.join(lp.getInJson("tracker","filterFolder"),'filtro_v'+str(data['version'])))

	with open(fileName,'w') as file:
		json.dump(data,file)

	N_THREADS = lp.getInJson("tracker","gaNumProcess")
	N_FILTROS = 256

	#input("Verifique as condiçoes no inicio do codigo")
	p = Pool(N_THREADS)
	#print('numero de threads e: ', N_THREADS)
	listaDim = [i for i in range(0,N_FILTROS,N_THREADS)]
	print(listaDim)
	
	for i in listaDim:
		listaExec = []
コード例 #6
0
******************************************* PARTE 1 ******************************************
**********************************************************************************************
**********************************************************************************************
'''

if TESTE_MODE:
    dim = 25
    print("A dimensao utilizada para teste sera: ", dim)
    input("Verifique as condiçoes no inicio do codigo")

else:
    dim = int(dim)

# Variaveis obtidas atraves do arquivo parameter.json
#--------------------------------------------------------------------
PATH_GT = lp.getInJson('tracker', 'gtPath')
PATH_GT_ESPACIAL = lp.getInJson('tracker', 'gtEspacialPath')
PATH_SIAMESE = lp.getInJson('tracker', 'siamesePath')
PATH_SIAMESE_ESPACIAL = lp.getInJson('tracker', 'siameseEspacialPath')
SHOW = lp.getInJson('tracker', 'show')
#--------------------------------------------------------------------


def getPathPickle(tipo):
    if tipo == 'gt':
        caminhoPickle = PATH_GT
    if tipo == 'siam':
        caminhoPickle = PATH_SIAMESE
    if tipo == 'gt_espacial':
        caminhoPickle = PATH_GT_ESPACIAL
    if tipo == 'siam_espacial':
コード例 #7
0
# Suofei ZHANG, 2017.

import tensorflow as tf
from parameters import configParams
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.training import moving_averages
import time
import localParameters as lp

MOVING_AVERAGE_DECAY = 0#0.9997        #only siamese-net in matlab uses 0 here, other projects with tensorflow all use 0.999 here, from some more documents, I think 0.999 is more probable here, since tensorflow uses a equation as 1-decay for this parameter
UPDATE_OPS_COLLECTION = 'sf_update_ops'
PRINT_SIAMESE_LOG = lp.getInJson('process','print_siamese_log')

class SiameseNet:
    learningRates = None

    def __init__(self):
        self.learningRates = {}

    def buildExemplarSubNetwork(self, exemplar, opts, isTrainingOp, branchType="original"):
        with tf.variable_scope('siamese') as scope:
            scope.reuse_variables()
            score = self.buildBranch(exemplar, opts, isTrainingOp, branchType=branchType)

        return score

    def buildInferenceNetwork(self, instance, zFeat, opts, isTrainingOp, branchType="original"):

        with tf.variable_scope('siamese') as scope:
            scope.reuse_variables()
            
コード例 #8
0
ファイル: test.py プロジェクト: hugolimachaves/superSiamese
import json
import socket
import localParameters as lp

NUM_THREAD = lp.getInJson('datasetRunner', 'threads')
LISTA_DE_PARAMETROS = lp.getInJson('datasetRunner', 'parametros')
CAMINHO_VOT_2015 = lp.getInJson('sistema', 'datasetPath')
PATH_SCRIPT = lp.getInJson('tracker', 'trackerPath')
NOME_ARQUIVO_SAIDA = lp.getInJson('datasetRunner', 'nome_saida')
'''
j = lp.getInJson("sistema","datasetPath")

def readParameters():
	with open('parameter.json') as file:
		k = json.load(file)
	return k


k = readParameters()

print('sistema' in k[1].keys())
'''
'''
def readParameters():
	with open("parameter.json") as file:
		k = json.load(file)


	for elemento in k:
		
		if elemento['sistema']['computador'] ==  socket.gethostname():