Exemplo n.º 1
0
 def do(searcher, problem):
     if sys.stdin.isatty():
         raw_input('Press Enter to run %s on %s' %
                   (name(searcher), name(problem)))
     else:
         print('Running %s on %s' % (name(searcher), name(problem)))
     p = InstrumentedProblem(problem)
     start = time.time()
     solution = searcher(p)
     elapsed = time.time() - start
     print('Elapsed time:', elapsed, 'seconds')
     if isinstance(solution, Node):
         path = solution.path()
         print('**************** Solution:')
         if len(path) < 50:
             path.reverse()
             print(path)
         else:
             print('len(path)=', len(path), 'too long, print suppressed')
         p.solution_cost = solution.path_cost
         print()
     else:  # search failed, return a negative value
         p.solution_cost = -1
         print('Search failed')
     return p
Exemplo n.º 2
0
def cutExpressionElements(tree):
    for node in PreOrderIter(tree):
        if name(node) == 'expressao' and len(node.children) > 0 and name(
                node.children[0]) == 'expressao_logica':
            for n in PreOrderIter(node):
                if name(n) == 'expressao_aditiva':
                    n.parent = node
                    node.children[0].parent = None
Exemplo n.º 3
0
 def _get_name(self, con, current):
     cur = self._dbh.execute(con, "SELECT name FROM domains WHERE id=%s",
                             utils.name(current))
     result = cur.fetchall()
     if len(result) != 1:
         raise utils.NoSuchObject
     return result[0][0]
Exemplo n.º 4
0
def compare_searchers(problems,
                      header,
                      searchers=[
                          breadth_first_tree_search,
                          breadth_first_graph_search, depth_first_graph_search,
                          iterative_deepening_search, depth_limited_search,
                          astar_search
                      ]):
    def do(searcher, problem):
        if sys.stdin.isatty():
            raw_input('Press Enter to run %s on %s' %
                      (name(searcher), name(problem)))
        else:
            print('Running %s on %s' % (name(searcher), name(problem)))
        p = InstrumentedProblem(problem)
        start = time.time()
        solution = searcher(p)
        elapsed = time.time() - start
        print('Elapsed time:', elapsed, 'seconds')
        if isinstance(solution, Node):
            path = solution.path()
            print('**************** Solution:')
            if len(path) < 50:
                path.reverse()
                print(path)
            else:
                print('len(path)=', len(path), 'too long, print suppressed')
            p.solution_cost = solution.path_cost
            print()
        else:  # search failed, return a negative value
            p.solution_cost = -1
            print('Search failed')
        return p

    # table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
    # print_table(table, header, sep='|')
    results = {(p, s): do(s, p) for p in problems for s in searchers}
    for p in problems:
        print('Problem:', name(p))
        for s in searchers:
            print('Searcher:%30s' % name(s), results[(p, s)])
    print('#exp: number of expanded nodes')
    print('#gt:  number of goal tests')
    print('#st:  number of generated states')


# Exercise: compute and compare penetrance and effective branching factor
Exemplo n.º 5
0
 def clearAll(self, con, current):
     cur = self._dbh.execute(
         con, "DELETE FROM records WHERE domain_id=%s AND type!='SOA'",
         utils.name(current))
     if cur.rowcount == 0:
         # no records were deleted. maybe such domain doesn't exist?
         self._check_existance(con, current)
     con.commit()
Exemplo n.º 6
0
 def addRecords(self, con, rs, current):
     domain = self._get_name(con, current)
     if len(rs) < 1:
         return
     zone_id = utils.name(current)
     rgen = (self._unpack_record(r, zone_id, domain) for r in rs)
     self._dbh.execute_many(con, self._add_record_query, rgen)
     con.commit()
Exemplo n.º 7
0
 def addRecord(self, con, record, current):
     domain = self._get_name(con, current)
     zone_id = utils.name(current)
     cur = self._dbh.execute(con, self._add_record_query,
                             *self._unpack_record(record, zone_id, domain))
     if cur.rowcount != 1:
         raise utils.NoSuchObject
     con.commit()
Exemplo n.º 8
0
 def setSOA(self, con, soar, current):
     cur = self._dbh.execute(
         con, "UPDATE records SET content=%s "
         "WHERE type='SOA' and domain_id=%s", utils.soar2str(soar),
         utils.name(current))
     if cur.rowcount != 1:
         raise self._dbh.NoSuchObject
     con.commit()
Exemplo n.º 9
0
 def getSOA(self, con, current):
     cur = self._dbh.execute(
         con, "SELECT content FROM records "
         "WHERE type='SOA' and domain_id=%s", utils.name(current))
     res = cur.fetchall()
     if len(res) != 1:
         raise utils.NoSuchObject
     return I.SOARecord(*utils.str2soar(res[0][0]))
Exemplo n.º 10
0
 def countRecords(self, con, current):
     cur = self._dbh.execute(
         con, "SELECT count(*) FROM records "
         "WHERE domain_id=%s AND type!='SOA'", utils.name(current))
     result = cur.fetchone()[0]
     if result == 0:
         self._check_existance(con, current)
     return long(result)
Exemplo n.º 11
0
 def getRecords(self, con, limit, offset, current):
     cur = self._dbh.execute_limited(
         con, limit, offset, "SELECT name, type, content, ttl, prio "
         "FROM records WHERE domain_id=%s AND type != 'SOA'",
         utils.name(current))
     res = cur.fetchall()
     if len(res) < 1:
         #maybe there is no such zone?
         self._check_existance(con, current)
     return self._pack_records(res)
Exemplo n.º 12
0
def compare_searchers(problems,
                      header,
                      searchers=[astar_search, depth_first_tree_search]):
    def do(searcher, problem):
        p = InstrumentedProblem(problem)
        searcher(p)
        return p

    table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
    print_table(table, header)
Exemplo n.º 13
0
def cutRepeatedElements(tree):
    for node in PostOrderIter(tree):
        no = node.parent  # pai no atual

        if node.parent != None:
            father = name(node.parent)  # nome pai no atual

            # Elimina um pai e leva seus filhos para o vô
            if name(node) == father:  # no autal == pai
                node.parent = no.parent  # meu novo pai e meu vo
                if len(no.children) > 0:
                    for item in no.children:
                        item.parent = node
                # if len(no.children) > 0: # tamanho peu pai antigo
                #     i = 0
                #     while i < len(no.children): # varios filhos meu pai antigo
                #         no.children[i].parent = node ## os filhos do meu pai antigo vem para mim
                #         i += 1

                no.parent = None  ## pai antigo retirado
Exemplo n.º 14
0
def OSserver():
    user = ""
    password = ""
    ua = "kodi_%s_v%s" % (name().lower(), version())
    if boolsetting('OScustomuser'):
        if setting('OSuser') != '' or setting('OSpassword') != '':
            user = setting('OSuser')
            password = setting('OSpassword')
        else:
            user = "******"
            password = "******"
    return OpenSubtitles.LogIn(user, password, 'en', ua)
Exemplo n.º 15
0
def compare_searchers(problems, header,
                      searchers=[breadth_first_tree_search,
                                 breadth_first_search, depth_first_graph_search,
                                 iterative_deepening_search,
                                 depth_limited_search,
                                 recursive_best_first_search]):
    def do(searcher, problem):
        p = InstrumentedProblem(problem)
        searcher(p)
        return p
    table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
    print_table(table, header)
Exemplo n.º 16
0
 def dropRecord(self, con, record, current):
     if record.name.endswith('.'):
         record.name = record.name[:-1]
     cur = self._dbh.execute(
         con, "DELETE FROM records "
         "WHERE domain_id=%s AND name=%s AND type=%s AND content=%s",
         utils.name(current), record.name, str(record.type), record.data)
     if cur.rowcount == 0:
         self._check_existance(con, current)
         #no exception raised - zone exists, but no such record
         raise ICore.NotFoundError("No such record.")
     con.commit()
Exemplo n.º 17
0
def compare_searchers(problems, header,
                      searchers=[breadth_first_tree_search,
                                 breadth_first_search,
                                 depth_first_graph_search,
                                 iterative_deepening_search,
                                 depth_limited_search,
                                 recursive_best_first_search]):
    def do(searcher, problem):
        p = InstrumentedProblem(problem)
        searcher(p)
        return p
    table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
    print_table(table, header)
Exemplo n.º 18
0
 def findRecords(self, con, phrase, limit, current):
     phrase = (phrase.replace('\\', '\\\\').replace('%', '\\%').replace(
         '_', '\\_') + '%')
     cur = self._dbh.execute_limited(
         con, limit, 0,
         "SELECT name, type, content, ttl, prio FROM records "
         "WHERE domain_id=%s AND type!='SOA' AND name LIKE %s",
         utils.name(current), phrase)
     res = cur.fetchall()
     if len(res) < 1:
         #maybe there is no such zone?
         self._check_existance(con, current)
     return self._pack_records(res)
Exemplo n.º 19
0
def atribution(node, builder):
    elem = None
    for n in PreOrderIter(node):
        pai = n.parent
        if n.is_leaf and ut.name(pai) == 'var' and ut.name(
                pai.parent) == 'atribuicao':
            nome = ut.name(n)

            if nome in locais.keys():
                elem = locais[nome]
            if nome in globais.keys():
                elem = globais[nome]

            resultado = expression(node.children[1], builder)

            if str(resultado.type) not in str(elem.type):
                if str(elem.type) == "i32*":
                    resultado = builder.fptoui(resultado, ir.IntType(32))
                else:
                    resultado = builder.uitofp(resultado, ir.DoubleType())

            builder.store(resultado, elem)
Exemplo n.º 20
0
def verifyCallFunc(tree):
    content = walkTable()
    funcsTable = []
    funcsTree = []

    for item in content:
        if 'categoria' in item and item['categoria'] == 'funcao':
            funcsTable.append(item['lexema'])
    
    for e in PreOrderIter(tree):
        if name(e) == 'chamada_funcao':
            funcsTree.append(name(e.children[0]))
    
    for func in funcsTree:
        if func not in funcsTable:
            linha = getLine(func)
            showErrors(linha, 'err', func, 10)
            exit(0)
    
    for func in funcsTable:
        if func not in funcsTree and func != 'principal':
            linha = getLine(func)
            showErrors(linha, 'warn', func, 8)
Exemplo n.º 21
0
def compare_searchers(problems, header,
                      searchers=[breadth_first_tree_search,
                                 breadth_first_search,
                                 depth_first_graph_search,
                                 iterative_deepening_search,
                                 depth_limited_search,
                                 recursive_best_first_search]):
    def do(searcher, problem):
        p = InstrumentedProblem(problem)
        start_time = time.perf_counter_ns()
        searcher(p)
        end_time = time.perf_counter_ns()
        return (p, str((end_time-start_time)/1000000000))
    table = [[name(s)] + [do(s, p) for p in problems] for s in searchers]
    print_table(table, header)
Exemplo n.º 22
0
 def replaceRecord(self, con, oldr, newr, current):
     domain = self._get_name(con, current)
     if oldr.name.endswith('.'):
         oldr.name = oldr.name[:-1]
     args = self._unpack_record(newr, None, domain)[1:]
     args += (utils.name(current), oldr.name, str(oldr.type), oldr.data)
     cur = self._dbh.execute(
         con, "UPDATE records SET "
         "name=%s, type=%s, content=%s, ttl=%s, prio=%s "
         "WHERE domain_id=%s AND name=%s AND type=%s AND content=%s", *args)
     if cur.rowcount != 1:
         # domain already exists, so there is no such record
         raise ICore.NotFoundError("Failed to replace record",
                                   "Record not found")
     con.commit()
Exemplo n.º 23
0
def walkingTree(tree, builder):
    for node in PreOrderIter(tree):
        node_name = ut.name(node)

        if node_name == 'chamada_funcao':
            callFunc(node, builder)
        if node_name == 'atribuicao':
            atribution(node, builder)
        if node_name == 'se':
            conditional(node, builder)
        if node_name == 'repita':
            loop(node, builder)
        if node_name == 'retorna':
            returnFunc(node, builder)
        if node_name == 'leia':
            printFunc(node, builder)
        if node_name == 'escreva':
            readFunc(node, builder)
Exemplo n.º 24
0
def callFunc(node, builder):
    nome_func = ut.name(node.children[0])
    lista = []

    for i in PreOrderIter(node.children[1]):
        if i.is_leaf:
            lista.append(expression(i, builder))

    salvaFunc = None

    for f in funcoes:
        if nome_func == f.name:
            salvaFunc = f
    for i, (param1, param2) in enumerate(zip(lista, salvaFunc.args)):
        if str(param1.type) not in str(param2.type):
            if "double" in str(param2.type):
                f = builder.uitofp(param1, ir.DoubleType())
                lista[i] = f
            elif "i32" in str(param2.type):
                f = builder.fptoui(param1, ir.IntType(32))
                lista[i] = f
    return builder.call(salvaFunc, lista)
Exemplo n.º 25
0
def resume_extracts(text):
    """
    Takes the text format of the resume and returns all the values

    text: text format of the resume
    """
    conts = {}

    # Preprocessing the document
    text = utils.preprocess(text)

    # Extracting name
    conts['name'] = utils.name(text)

    # Extracting Address
    conts['address'] = utils.address(text)

    # Extracting mobile number
    conts['mobile'] = utils.mobile_number(text)

    # Extracting Email
    conts['email'] = utils.email(text)

    # Extracting github
    conts['github'] = utils.github(text)

    # Extracting linkedin
    conts['linkedin'] = utils.linkedin(text)

    # Provides the expertise list match
    conts['expertise'] = utils.expertise_match(text)

    # Provides the resume to job description simiarity
    conts['similarity_score'] = utils.jobdes_rsm_similarity(text)

    return conts
Exemplo n.º 26
0
            # searchers=searchList
            players=playerList)
    except:
        traceback.print_exc()
        sys.exit(1)

allScores = newScores()
maxScores = {}
for batch in gradeInfo:
    print('Scores for: %s' % batch)
    maxScores[batch] = {}
    for pLabel in gradeInfo[batch]:
        table = []
        maxScores[batch][pLabel] = newScores()
        for searcher in gradeInfo[batch][pLabel]:
            sLabel = utils.name(searcher)
            info = gradeInfo[batch][pLabel][searcher]
            scoreSet = info['score']
            table.append(['%s, %s:' % (sLabel, pLabel), scoreList(scoreSet)])
            for label in scoreSet:
                accumulator = accuMethods[label]
                maxScores[batch][pLabel][label] = accumulator(
                    maxScores[batch][pLabel][label], scoreSet[label])
        if len(table) > 1:
            table.append([
                '%s summary:' % (pLabel),
                scoreList(maxScores[batch][pLabel])
            ])
        print_table(table)
        if len(table) > 1:
            print()
Exemplo n.º 27
0
def vid_img(opt):
    output_dir = (opt.output_dir + "/" + name(opt.input.content) + "_" +
                  "_".join([name(s) for s in opt.input.style.split(",")]))

    flow_model = flow.get_flow_model(opt)
    frames = load.process_content_video(flow_model, opt)
    content_size = np.array(load.preprocess(frames[0]).size()[-2:])

    style_images_big = load.process_style_images(opt)

    for size_n, (current_size,
                 num_iters) in enumerate(zip(*determine_scaling(opt.param))):
        print("\nCurrent size {}px".format(current_size))
        os.makedirs(output_dir + "/" + str(current_size), exist_ok=True)
        content_scale = current_size / max(*content_size)

        if current_size <= 1024:
            opt.model.gpu = 0
            opt.model.multidevice = False
        else:
            opt.model.gpu = "0,1"
            opt.model.multidevice = True
            opt.param.tv_weight = 0
        net = models.load_model(opt.model, opt.param)
        net.set_style_targets(style_images_big, content_scale * content_size,
                              opt)

        for pass_n in range(opt.param.passes_per_scale):
            init_image = None
            for (prev_frame, this_frame) in zip(frames,
                                                frames[1:] + frames[:1]):
                # TODO add update_style() function to support changing styles per frame
                opt.output = "%s/%s/%s_%s.png" % (output_dir, current_size,
                                                  pass_n + 1, name(this_frame))
                if os.path.isfile(opt.output):
                    print(
                        "Skipping pass: %s, frame: %s. File already exists." %
                        (pass_n + 1, name(this_frame)))
                    continue
                print("Optimizing... size: %s, pass: %s, frame: %s" %
                      (current_size, pass_n + 1, name(this_frame)))

                content_frames = [
                    F.interpolate(load.preprocess(prev_frame),
                                  scale_factor=content_scale,
                                  mode="bilinear",
                                  align_corners=False),
                    F.interpolate(load.preprocess(this_frame),
                                  scale_factor=content_scale,
                                  mode="bilinear",
                                  align_corners=False),
                ]
                content_frames = [
                    match_histogram(frame, style_images_big[0])
                    for frame in content_frames
                ]
                net.set_content_targets(content_frames[1], opt)

                # Initialize the image
                # TODO make sure initialization correct even when continuing half way through video stylization
                if size_n == 0 and pass_n == 0:
                    if opt.input.init == "random":
                        init_image = th.randn(
                            content_frames[1].size()).mul(0.001)
                    elif opt.input.init == "prev_warp":
                        flo_file = "%s/flow/forward_%s_%s.flo" % (
                            output_dir, name(prev_frame), name(this_frame))
                        flow_map = load.flow_warp_map(flo_file)
                        if init_image is None:
                            init_image = content_frames[0]
                        init_image = F.grid_sample(init_image,
                                                   flow_map,
                                                   padding_mode="border")
                    else:
                        init_image = content_frames[1].clone()
                else:
                    if pass_n == 0:
                        # load images from last pass of previous size
                        if init_image is None:
                            ifile = "%s/%s/%s_%s.png" % (
                                output_dir,
                                prev_size,
                                opt.param.passes_per_scale,
                                name(prev_frame),
                            )
                            init_image = load.preprocess(ifile)
                            init_image = F.interpolate(
                                init_image,
                                size=content_frames[0].size()[2:],
                                mode="bilinear",
                                align_corners=False)
                        bfile = "%s/%s/%s_%s.png" % (
                            output_dir,
                            prev_size,
                            opt.param.passes_per_scale,
                            name(this_frame),
                        )
                        blend_image = load.preprocess(bfile)
                        blend_image = F.interpolate(
                            blend_image,
                            size=content_frames[0].size()[2:],
                            mode="bilinear",
                            align_corners=False)
                    else:
                        # load images from previous pass of current size
                        if init_image is None:
                            ifile = "%s/%s/%s_%s.png" % (output_dir,
                                                         current_size, pass_n,
                                                         name(prev_frame))
                            init_image = load.preprocess(ifile)
                        bfile = "%s/%s/%s_%s.png" % (output_dir, current_size,
                                                     pass_n, name(this_frame))
                        blend_image = load.preprocess(bfile)

                    direction = "forward" if pass_n % 2 == 0 else "backward"
                    flo_file = f"{output_dir}/flow/{direction}_{name(prev_frame)}_{name(this_frame)}.flo"
                    flow_map = load.flow_warp_map(flo_file)
                    flow_map = F.interpolate(flow_map.permute(0, 3, 1, 2),
                                             size=init_image.size()[2:],
                                             mode="bilinear").permute(
                                                 0, 2, 3, 1)

                    warp_image = F.grid_sample(init_image,
                                               flow_map,
                                               padding_mode="border")

                    flow_weight_file = f"{output_dir}/flow/{direction}_{name(prev_frame)}_{name(this_frame)}.png"
                    reliable_flow = load.reliable_flow_weighting(
                        flow_weight_file)
                    reliable_flow = F.interpolate(reliable_flow,
                                                  size=init_image.size()[2:],
                                                  mode="bilinear",
                                                  align_corners=False)

                    net.set_temporal_targets(warp_image,
                                             warp_weights=reliable_flow,
                                             opt=opt)

                    blend_init_image = (
                        1 - opt.param.blend_weight
                    ) * blend_image + opt.param.blend_weight * init_image
                    warp_blend_init_image = F.grid_sample(
                        blend_init_image, flow_map, padding_mode="border")
                    init_image = warp_blend_init_image

                output_image = style.optimize(
                    content_frames, style_images_big, init_image,
                    num_iters // opt.param.passes_per_scale, opt)

                init_image = match_histogram(output_image.detach().cpu(),
                                             style_images_big[0])

                disp = load.deprocess(init_image.clone())
                if opt.param.original_colors == 1:
                    disp = load.original_colors(
                        load.deprocess(content_frames[1].clone()), disp)
                disp.save(str(opt.output))

            # clean up / prepare for next pass
            frames = frames[7:] + frames[:7]  # rotate frames
            frames = list(reversed(frames))

        ffmpeg.input(output_dir + "/" + str(current_size) + "/" + str(pass_n) +
                     "_%05d.png").output(
                         "%s/%s_%s.mp4" %
                         (output_dir, name(output_dir), current_size),
                         **opt.ffmpeg).overwrite_output().run()
        prev_size = current_size
        del net
        th.cuda.empty_cache()

    ffmpeg.input("{output_dir}/{current_size}/{pass_n}_%05d.png").output(
        opt.output, **opt.ffmpeg).overwrite_output().run()
Exemplo n.º 28
0
def expression(node, builder):
    exp = []
    elem1 = None

    for f in PreOrderIter(node):
        if ut.name(f) == 'chamada_funcao':
            return callFunc(f, builder)

    for f in PreOrderIter(node):
        if f.is_leaf:
            exp.append(ut.name(f))

    if len(exp) == 3:
        if exp[1] == "+":
            if isinstance(exp[0], float) or isinstance(exp[2], float):
                return builder.fadd(retFloat(exp[0]),
                                    retFloat(exp[2]),
                                    name="addFloat")
            else:
                return builder.add(retInt(exp[0]),
                                   retInt(exp[2]),
                                   name="addInt")
        if exp[1] == "-":
            return builder.sub(retInt(exp[0]), retInt(exp[2]), name="sub")
        if exp[1] == "*":
            if isinstance(exp[0], float):
                return builder.fmul(retFloat(exp[0]),
                                    retFloat(exp[2]),
                                    name="mulFloat")
            else:
                return builder.mul(retInt(exp[0]),
                                   retInt(exp[2]),
                                   name="mulInt")
        if exp[1] == "/":
            if isinstance(exp[0], float):
                return builder.fdiv(retFloat(exp[0]),
                                    retFloat(exp[2]),
                                    name="divFloat")
            else:
                return builder.udiv(retInt(exp[0]),
                                    retInt(exp[2]),
                                    name="divInt")
        if exp[1] in [">", "<", ">=", "<="]:
            return builder.icmp_signed(exp[1],
                                       retInt(exp[0]),
                                       retInt(exp[2]),
                                       name="maiorMenor")
        if exp[1] == "<>":
            return builder.icmp_signed("!=",
                                       retInt(exp[0]),
                                       retInt(exp[2]),
                                       name="dif")
        if exp[1] == "=":
            return builder.icmp_signed("==",
                                       retInt(exp[0]),
                                       retInt(exp[2]),
                                       name="igual")
    elif len(exp) == 1:
        if not isnumber(exp[0]) and '.' not in exp[0]:
            if exp[0] in locais.keys():
                elem1 = locais[exp[0]]
            if exp[0] in globais.keys():
                elem1 = globais[exp[0]]
            print(exp)
            return builder.load(elem1)
        else:
            if isnumber(exp[0]):
                return retInt(int(exp[0]))
            else:
                return retFloat(float(exp[0]))
Exemplo n.º 29
0
def vid_img(args):
    output_dir = args.output_dir + "/" + name(args.content) + "_" + "_".join(
        [name(s) for s in args.style])

    flow_model = flow.get_flow_model(args)
    frames = load.process_content_video(flow_model, args)
    content_size = np.array(load.preprocess(frames[0]).size()[-2:])

    style_images_big = load.process_style_images(args)

    for size_n, (current_size,
                 num_iters) in enumerate(zip(args.image_sizes,
                                             args.num_iters)):

        if len(
                glob.glob("%s/%s/*.png" % (output_dir, args.image_sizes[min(
                    len(args.image_sizes) - 1, size_n + 1)]))) == len(frames):
            print("Skipping size: %s, already done." % current_size)
            prev_size = current_size
            continue

        print("\nCurrent size {}px".format(current_size))
        os.makedirs(output_dir + "/" + str(current_size), exist_ok=True)
        content_scale = current_size / max(*content_size)

        # scale style images
        style_images = []
        content_area = content_scale**2 * content_size[0] * content_size[1]
        for img in style_images_big:
            style_scale = math.sqrt(
                content_area / (img.size(3) * img.size(2))) * args.style_scale
            style_images.append(
                F.interpolate(th.clone(img),
                              scale_factor=style_scale,
                              mode="bilinear",
                              align_corners=False))

        optim.set_model_args(args, current_size)
        net, losses = models.load_model(args)
        # optim.set_style_targets(net, style_images, args)

        for pass_n in range(args.passes_per_scale):
            pastiche = None

            if args.loop:
                start_idx = random.randrange(0, len(frames) - 1)
                frames = frames[
                    start_idx:] + frames[:start_idx]  # rotate frames

            if len(
                    glob.glob("%s/%s/%s_*.png" % (output_dir, current_size,
                                                  pass_n + 2))) == len(frames):
                print(f"Skipping pass: {pass_n + 1}, already done.")
                frames = list(reversed(frames))
                continue

            for n, (prev_frame, this_frame) in enumerate(
                    zip(frames + frames[:11 if args.loop else 1],
                        frames[1:] + frames[:10 if args.loop else 1])):
                # TODO add update_style() function to support changing styles per frame

                args.output = "%s/%s/%s_%s.png" % (
                    output_dir, current_size, pass_n + 1, name(this_frame))
                if os.path.isfile(args.output) and not n >= len(frames):
                    print(
                        "Skipping pass: %s, frame: %s. File already exists." %
                        (pass_n + 1, name(this_frame)))
                    continue

                print("Optimizing... size: %s, pass: %s, frame: %s" %
                      (current_size, pass_n + 1, name(this_frame)))

                content_frames = [
                    F.interpolate(load.preprocess(prev_frame),
                                  scale_factor=content_scale,
                                  mode="bilinear",
                                  align_corners=False),
                    F.interpolate(load.preprocess(this_frame),
                                  scale_factor=content_scale,
                                  mode="bilinear",
                                  align_corners=False),
                ]
                content_frames = [
                    match_histogram(frame,
                                    style_images_big[0],
                                    mode=args.match_histograms)
                    for frame in content_frames
                ]
                flow_direction = "forward" if pass_n % 2 == 0 else "backward"
                # optim.set_content_targets(net, content_frames[1], args)

                # Initialize the image
                # TODO make sure initialization correct even when continuing half way through video stylization
                if size_n == 0 and pass_n == 0:
                    if args.init == "random":
                        pastiche = th.randn(
                            content_frames[1].size()).mul(0.001)
                    elif args.init == "prev_warp":
                        if pastiche is None:
                            pastiche = content_frames[0]
                        flo_file = f"{output_dir}/flow/{flow_direction}_{name(prev_frame)}_{name(this_frame)}.flo"
                        flow_map = load.flow_warp_map(flo_file,
                                                      pastiche.shape[2:])
                        pastiche = F.grid_sample(pastiche,
                                                 flow_map,
                                                 padding_mode="border")
                    else:
                        pastiche = content_frames[1].clone()
                else:
                    if pass_n == 0:
                        # load images from last pass of previous size
                        if pastiche is None:
                            ifile = "%s/%s/%s_%s.png" % (
                                output_dir,
                                prev_size
                                if n <= len(frames) else current_size,
                                args.passes_per_scale
                                if n <= len(frames) else pass_n + 1,
                                name(prev_frame),
                            )
                            pastiche = load.preprocess(ifile)
                            pastiche = F.interpolate(
                                pastiche,
                                size=content_frames[0].size()[2:],
                                mode="bilinear",
                                align_corners=False)
                        bfile = "%s/%s/%s_%s.png" % (
                            output_dir,
                            prev_size if n <= len(frames) else current_size,
                            args.passes_per_scale
                            if n <= len(frames) else pass_n + 1,
                            name(this_frame),
                        )
                        blend_image = load.preprocess(bfile)
                        blend_image = F.interpolate(
                            blend_image,
                            size=content_frames[0].size()[2:],
                            mode="bilinear",
                            align_corners=False)
                    else:
                        # load images from previous pass of current size
                        if pastiche is None:
                            ifile = "%s/%s/%s_%s.png" % (
                                output_dir,
                                current_size,
                                pass_n if n <= len(frames) else pass_n + 1,
                                name(prev_frame),
                            )
                            pastiche = load.preprocess(ifile)
                        bfile = "%s/%s/%s_%s.png" % (
                            output_dir,
                            current_size,
                            pass_n if n <= len(frames) else pass_n + 1,
                            name(this_frame),
                        )
                        blend_image = load.preprocess(bfile)

                    flo_file = f"{output_dir}/flow/{flow_direction}_{name(prev_frame)}_{name(this_frame)}.flo"
                    flow_map = load.flow_warp_map(flo_file, pastiche.shape[2:])

                    warp_image = F.grid_sample(pastiche,
                                               flow_map,
                                               padding_mode="border")

                    flow_weight_file = f"{output_dir}/flow/{flow_direction}_{name(prev_frame)}_{name(this_frame)}.png"
                    reliable_flow = load.reliable_flow_weighting(
                        flow_weight_file)
                    reliable_flow = F.interpolate(reliable_flow,
                                                  size=pastiche.size()[2:],
                                                  mode="bilinear",
                                                  align_corners=False)

                    optim.set_temporal_targets(net,
                                               warp_image,
                                               warp_weights=reliable_flow,
                                               args=args)

                    pastiche = (
                        1 - args.temporal_blend
                    ) * blend_image + args.temporal_blend * pastiche

                output_image = optim.optimize(
                    content_frames[1], style_images, pastiche,
                    num_iters // args.passes_per_scale, args, net, losses)

                pastiche = match_histogram(output_image.detach().cpu(),
                                           style_images_big[0],
                                           mode=args.match_histograms)

                disp = load.deprocess(pastiche.clone())
                if args.original_colors == 1:
                    disp = load.original_colors(
                        load.deprocess(content_frames[1].clone()), disp)
                disp.save(str(args.output))

            # reverse frames for next pass
            frames = list(reversed(frames))

        ffmpeg.input(output_dir + "/" + str(current_size) + "/" + str(pass_n) +
                     "_%05d.png").output(
                         "%s/%s_%s.mp4" %
                         (output_dir, name(output_dir), current_size),
                         **args.ffmpeg).overwrite_output().run()
        prev_size = current_size
        del net
        th.cuda.empty_cache()
Exemplo n.º 30
0
def cutUselessElements(tree):
    for node in PostOrderIter(tree):
        if (name(node) == ':' or name(node) == ',' or name(node) == '('
                or name(node) == ')' or name(node) == '[' or name(node) == ']'
                or name(node) == ':='):
            node.parent = None
Exemplo n.º 31
0
import config
import flow
import load
from utils import match_histogram, name

torch.backends.cudnn.benchmark = True

args = config.get_args()

if args.seed >= 0:
    torch.manual_seed(args.seed)
    torch.cuda.manual_seed_all(args.seed)
    if args.backend == "cudnn":
        torch.backends.cudnn.deterministic = True

output_dir = args.output_dir + "/" + name(args.content) + "_" + "_".join(
    [name(s) for s in args.style])

flow_model = flow.get_flow_model(args)
frames = load.process_content_video(flow_model, args)
content_size = np.array(load.preprocess(frames[0]).size()[-2:])

style_images_big = load.process_style_images(args)

for size_n, (current_size,
             num_iters) in enumerate(zip(args.image_sizes, args.num_iters)):

    if len(
            glob("%s/%s/*.png" % (output_dir, args.image_sizes[min(
                len(args.image_sizes) - 1, size_n + 1)]))) == len(frames):
        print("Skipping size: %s, already done." % current_size)