Ejemplo n.º 1
0
def main():
    filename = './origin_data/bugreports.xml'
    path = './bug_reports'
    bugslist = utils.read_xml(filename)
    # print(bugslist)
    label = utils.read_label('./origin_data/goldset.txt')
    # print(label)
    samples, ids = utils.get_content(bugslist)
    # print(samples)
    num_word_list, numword = utils.count_word(samples)
    # print(len(num_word_list))

    # for i in num_word_list:
    #     num_sentence.append(len(i))
    utils.savefile(samples)
    # print(num_sentence)
    results = textrank.bugsum(path, numword, num_word_list)
    print(len(i) for i in results)
    # extra_ids = index2id(results,ids)
    # print(len(extra_ids))
    pred = eval.index2pred(results, ids)
    y = eval.label2y(label, ids)
    mean_acc, mean_pr, mean_re, mean_f1 = eval.evaluate(y, pred)
    print('mean_acc, mean_pr, mean_re, mean_f1', mean_acc, mean_pr, mean_re,
          mean_f1)
Ejemplo n.º 2
0
def canais():
    librtmpwindow()
    info_servidores()

    nrcanais = 62
    canaison = []
    empty = 'nada'
    #GA("None","listacanais")
    if selfAddon.getSetting("prog-lista3") == "true":
        mensagemprogresso.create('TV Portuguesa',
                                 'A carregar listas de programação.',
                                 'Por favor aguarde.')
        mensagemprogresso.update(0)
        if mensagemprogresso.iscanceled(): sys.exit(0)
        programas = p_todos()
        mensagemprogresso.close()
    else:
        programas = []

    sintomecomsorte()

    if activado == True:
        addCanal("[B]Lista Completa[/B]", empty, 16,
                 tvporpath + art + 'gravador-ver1.png', nrcanais, '')
    addDir("[B][COLOR white]Informações[/COLOR][/B]", 'nada', 1,
           tvporpath + art + 'defs-ver2.png', 1,
           'Clique aqui para voltar ao menu principal.', True)
    if selfAddon.getSetting("listas-pessoais") == "true":
        addDir("[B][COLOR white]Listas Pessoais[/COLOR][/B]", 'nada', 6,
               tvporpath + art + 'listas-ver2.png', 1,
               'Outras listas de canais criadas pela comunidade.', True)

    if selfAddon.getSetting("radios") == "true":
        addDir("[B][COLOR white]Radios[/COLOR][/B]", 'nada', 19,
               tvporpath + art + 'radios-v1.png', 1,
               'Oiça comodamente radios nacionais.', True)
    if selfAddon.getSetting("eventos") == "true":
        canaison.append('[B][COLOR white]Eventos[/COLOR][/B]')
        changeview()
    if selfAddon.getSetting("praias") == "true":
        addDir("[B][COLOR white]Praias[/COLOR][/B]", 'nada', 26,
               tvporpath + art + 'versao-ver2.png', 1,
               'Webcams das melhores praias nacionais.', True)

    setupCanais(canaison, empty, nrcanais, programas)

    try:
        canaison = ''.join(canaison)
        savefile('canaison', canaison)
    except:
        pass

    vista_canais()
    xbmcplugin.setContent(int(sys.argv[1]), 'livetv')
Ejemplo n.º 3
0
def signal_handler(signal, frame):
    """
    Capture Ctrl+C
    """
    global crawler_state
    global interrupted
    
    print "Saving state Ctrl+C"
    interrupted = True
    
    #Serialize
    dumped = pickle.dumps(crawler_state)
    savefile('state.data', dumped)
    sys.exit(0)
Ejemplo n.º 4
0
def main():

    # Arguments logic
    parser = argparse.ArgumentParser(description='Web crawler')
    parser.add_argument('website', nargs='?', action='store', help='website to crawl')
    parser.add_argument('-l', action='store', default=2, help='maximum depth level to crawl')
    parser.add_argument('-resume', action='store_const', const=32, help='resume crawler')

    global crawler_state
    
    # Arguments parser
    args = parser.parse_args()
    
    # Crawler
    c = Crawler(args.website, int(args.l))
    crawler_state = c
    
    # Register Ctrl+C
    signal.signal(signal.SIGINT, signal_handler)
    
    if not args.resume:
        info = c.crawl(args.website)
    else:
        #recover state
        print "recovering..."
        raw = loadfile('state.data')
        data = pickle.loads(raw)
        c.output = data.output
        
        c.queue = data.queue        
        c.queue.append((data.current_url, data.level))
        
        c.visited = data.visited
        c.url = data.url
        c.maxdepth = data.maxdepth
        info = c.crawl(data.url)
        print "OK"
        
    
    # Save data
    savefile('structure.txt', info)
Ejemplo n.º 5
0
def canais():
    librtmpwindow()
    info_servidores()

    nrcanais=62
    canaison=[]
    empty='nada'
    #GA("None","listacanais")
    if selfAddon.getSetting("prog-lista3") == "true":
        mensagemprogresso.create('TV Portuguesa', 'A carregar listas de programação.','Por favor aguarde.')
        mensagemprogresso.update(0)
        if mensagemprogresso.iscanceled(): sys.exit(0)
        programas=p_todos()
        mensagemprogresso.close()
    else: programas=[]

    sintomecomsorte()

    if activado==True: addCanal("[B]Lista Completa[/B]",empty,16,tvporpath + art + 'gravador-ver1.png',nrcanais,'')
    addDir("[B][COLOR white]Informações[/COLOR][/B]",'nada',1,tvporpath + art + 'defs-ver2.png',1,'Clique aqui para voltar ao menu principal.',True)
    if selfAddon.getSetting("listas-pessoais") == "true":
        addDir("[B][COLOR white]Listas Pessoais[/COLOR][/B]",'nada',6,tvporpath + art + 'listas-ver2.png',1,'Outras listas de canais criadas pela comunidade.',True)

    if selfAddon.getSetting("radios") == "true": addDir("[B][COLOR white]Radios[/COLOR][/B]",'nada',19,tvporpath + art + 'radios-v1.png',1,'Oiça comodamente radios nacionais.',True)
    if selfAddon.getSetting("eventos") == "true": canaison.append('[B][COLOR white]Eventos[/COLOR][/B]'); changeview()
    if selfAddon.getSetting("praias") == "true": addDir("[B][COLOR white]Praias[/COLOR][/B]",'nada',26,tvporpath + art + 'versao-ver2.png',1,'Webcams das melhores praias nacionais.',True)

    setupCanais(canaison, empty, nrcanais, programas)

    try:
        canaison=''.join(canaison)
        savefile('canaison', canaison)
    except: pass

    vista_canais()
    xbmcplugin.setContent(int(sys.argv[1]), 'livetv')
Ejemplo n.º 6
0
def train_er_classifier(train_loader, test_loader, encoder, discriminator,
                        classifier, use_cuda, n_z, sigma, num_epoch, lr,
                        LAMBDA, LAMBDA0, LAMBDA1, file_name, epsilon, k, a,
                        delay, print_every, dataset, attack_range):
    criterion2 = nn.CrossEntropyLoss()
    adversary = LinfPGDAttackOT(epsilon=epsilon, k=k, a=a, data=attack_range)

    encoder.train()
    discriminator.train()
    classifier.train()
    # Optimizers
    enc_optim = optim.Adam(encoder.parameters(), lr=lr)
    dis_optim = optim.Adam(discriminator.parameters(), lr=0.5 * lr)
    cla_optim = optim.Adam(classifier.parameters(), lr=0.05 * lr)

    enc_scheduler = StepLR(enc_optim, step_size=30, gamma=0.5)
    dis_scheduler = StepLR(dis_optim, step_size=30, gamma=0.5)
    cla_scheduler = StepLR(cla_optim, step_size=30, gamma=0.5)

    if use_cuda:
        encoder, discriminator, classifier = encoder.cuda(
        ), discriminator.cuda(), classifier.cuda()

    one = torch.Tensor([1])
    mone = one * -1

    if use_cuda:
        one = one.cuda()
        mone = mone.cuda()
    for epoch in range(num_epoch):
        step = 0

        for images, labels in tqdm(train_loader):

            if use_cuda:
                images, labels = images.cuda(), labels.cuda()

            # ======== Training ======== #

            batch_size = images.size()[0]

            encoder.zero_grad()
            discriminator.zero_grad()
            classifier.zero_grad()

            # ======== Get Adversarial images ======== #
            if epoch >= delay:
                target_pred = pred_batch_ot(images, classifier, encoder)
                images_adv = adv_train_ot(images, target_pred, classifier,
                                          encoder, adversary)
                images_adv = to_var(images_adv)
            # ======== Train Discriminator ======== #
            frozen_params(encoder)
            frozen_params(classifier)
            free_params(discriminator)

            z_fake = torch.randn(batch_size, n_z) * sigma

            if use_cuda:
                z_fake = z_fake.cuda()

            d_fake = discriminator(to_var(z_fake))
            z_real = encoder(images)
            d_real = discriminator(to_var(z_real))

            disc_fake = LAMBDA * d_fake.mean()
            disc_real = LAMBDA * d_real.mean()

            disc_fake.backward(one)
            disc_real.backward(mone)
            diss_loss = disc_fake - disc_real

            dis_optim.step()

            clip_params(discriminator)

            if epoch >= delay:
                z_fake = torch.randn(batch_size, n_z) * sigma

                if use_cuda:
                    z_fake = z_fake.cuda()

                d_fake = discriminator(to_var(z_fake))
                z_real = encoder(images_adv)
                d_real = discriminator(to_var(z_real))

                disc_fake = LAMBDA * d_fake.mean()
                disc_real = LAMBDA * d_real.mean()

                disc_fake.backward(one)
                disc_real.backward(mone)
                diss_loss = disc_fake - disc_real

                dis_optim.step()

                clip_params(discriminator)
            # ======== Train Classifier and Encoder======== #
            free_params(encoder)
            free_params(classifier)
            frozen_params(discriminator)

            pred_labels = classifier(encoder(to_var(images)))
            class_loss = LAMBDA0 * criterion2(pred_labels, labels)

            if epoch >= delay:
                pred_labels_adv = classifier(encoder(to_var(images_adv)))
                class_loss_adv = LAMBDA0 * criterion2(pred_labels_adv, labels)
                class_loss = (class_loss + class_loss_adv) / 2

            class_loss.backward()

            cla_optim.step()
            enc_optim.step()

            #            # ======== Train Encoder ======== #
            free_params(encoder)
            frozen_params(classifier)
            frozen_params(discriminator)

            z_real = encoder(images)
            d_real = discriminator(encoder(Variable(images.data)))

            d_loss = LAMBDA1 * (d_real.mean())

            d_loss.backward(one)

            enc_optim.step()

            if epoch >= delay:
                z_real = encoder(images_adv)
                d_real = discriminator(encoder(Variable(images_adv.data)))

                d_loss = LAMBDA1 * (d_real.mean())
                d_loss.backward(one)

                enc_optim.step()

            step += 1

            if (step + 1) % print_every == 0:
                print(
                    "Epoch: [%d/%d], Step: [%d/%d], Discriminative Loss: %.4f, Classification_Loss:%.4f"
                    % (epoch + 1, num_epoch, step + 1, len(train_loader),
                       diss_loss.data.item(), class_loss.data.item()))

        if (epoch + 1) % 1 == 0:
            savefile(file_name,
                     encoder,
                     discriminator,
                     classifier,
                     dataset=dataset)
            test(test_loader, classifier, encoder=encoder, use_cuda=True)

    savefile(file_name, encoder, discriminator, classifier, dataset=dataset)
    return classifier, encoder