示例#1
0
def main(args):
    use_cuda = torch.cuda.is_available()
    print('==> Loading data..')
    train_loader, test_loader = loaddata(args)

    print('==> Loading model..')
    encoder, discriminator, classifier = loadmodel(args)

    print('==> Training starts..')
    torch.manual_seed(123)
    classifier, encoder = train_er_classifier(train_loader,
                                              test_loader,
                                              encoder,
                                              discriminator,
                                              classifier,
                                              use_cuda=use_cuda,
                                              n_z=args['n_z'],
                                              sigma=args['sigma'],
                                              num_epoch=args['epochs'],
                                              lr=args['lr'],
                                              LAMBDA=args['LAMBDA'],
                                              LAMBDA0=args['LAMBDA0'],
                                              LAMBDA1=args['LAMBDA1'],
                                              delay=args['delay'],
                                              file_name=args['file_name'],
                                              epsilon=args['epsilon'],
                                              k=args['k'],
                                              a=args['a'],
                                              print_every=args['print_every'],
                                              dataset=args['dataset'],
                                              attack_range=args['range'])

    test(test_loader, classifier, encoder=encoder, use_cuda=True)
    print('==> Testing the model against PGD attack..')
    testattack(classifier,
               encoder,
               test_loader,
               epsilon=args['epsilon'],
               k=args['k'],
               a=args['a'],
               dataset=args['range'],
               use_cuda=True)
示例#2
0
    args.epsilon_decay_factor = 0.99
    args.lr = 0.001
    args.gamma = 0.90

    policy = DQNPolicy(make_dqn(statesize, actionsize),
                       statesize,
                       actionsize,
                       lr=args.lr,
                       gamma=args.gamma)
    utils.qlearn(env, policy, args)
    torch.save(policy.model, args.model)

    # From here, take from mp7.py
    # Environment (a Markov Decision Process model)
    # Q Model

    model = utils.loadmodel(args.model, env, statesize, actionsize)
    print("Model: {}".format(model))

    # Rollout
    _, rewards = utils.rollout(env,
                               model,
                               args.episodes,
                               args.epsilon,
                               render=True)

    # Report
    #Evaluate total rewards for MountainCar environment
    score = np.array([np.array(rewards) > -200.0]).sum()
    print('Score: ' + str(score) + '/' + str(args.episodes))
import time

time_start=time.time()
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"

#%%
inputdir = os.path.join('.', 'TestData')
outputdir = os.path.join('.', 'TestResults')
utils.checkpath(outputdir)
filenames = utils.all_files_under(inputdir)


#%% load model
modelfile = os.path.join('.','Pretrained','Model.json')
weightsfile = os.path.join('.','Pretrained','Weights.h5')
model = utils.loadmodel(modelfile, weightsfile)

all_num = len(filenames)
pre_num = 0
acc_num = 0
setFont = ImageFont.truetype('C:/windows/fonts/Arial.ttf', 60)
fillColor = "#000000"
REFSTR = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'

for itr in range(all_num):
    filename = filenames[itr]
    img_rgb = np.array(Image.open(filename))
    img = 255-np.array((Image.open(filename)).convert('L') ) #转化为灰度图 
    cnt,image = utils.splitimage(img)
    if (cnt == 8):
        pre_num += 1
示例#4
0
        min_loss = float('Inf')
        for epoch in range(1, n_epochs + 1):
            model.train()
            tr_loss, tr_acc = fit(model, train_dl, criteria, optimizer, device)
            model.eval()
            val_loss, val_acc = valid(model, valid_dl, criteria, optimizer,
                                      device)
            test_loss, test_acc = valid(model, test_dl, criteria, optimizer,
                                        device)

            log_value('Loss/train', tr_loss, epoch)
            log_value('Accuracy/train', tr_acc, epoch)
            log_value('Loss/valid', val_loss, epoch)
            log_value('Accuracy/valid', val_acc, epoch)
            log_value('Loss/test', test_loss, epoch)
            log_value('Accuracy/test', test_acc, epoch)
            if val_loss < min_loss:
                savemodel(model, dir='siamese', device=device)
                min_loss = val_loss
            if epoch % id == 0:
                print(
                    "epoch {} tr_loss {:.3f} acc {:.3f} valid_loss {:.3f} acc {:.3f} test_loss {:.3f} acc {:.3f}"
                    .format(epoch, tr_loss, tr_acc, val_loss, val_acc,
                            test_loss, test_acc))
    else:
        model = loadmodel(dir='siamese_lstm')
        print("loaded model\n evaluating....")
        test_loss, test_acc = valid(model, test_dl, criteria, optimizer,
                                    device)
        print("test_loss {:.3f} acc {:.3f}".format(test_loss, test_acc))
示例#5
0
#coding:utf-8
portList = (8889, )  #本服务器监听端口

import tornado.ioloop
import tornado.web
import numpy as np
from time import sleep
#import shutil
#import os
from random import random
from io import BytesIO
from PIL import Image
from base64 import b64decode
import utils
model = utils.loadmodel('Model.json', 'Weights.h5')
REFSTR = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ'


def decaptcha(img):
    try:
        #upload_path=os.path.join(os.path.dirname(__file__),'files')  #文件的暂存路径
        #file_metas=self.request.files['file']    #提取表单中‘name’为‘file’的文件元数据
        #print(file_metas)
        #print(self)
        #img0 = b64decode(self.get_argument('file'))
        #img = Image.open(BytesIO(img0))
        img = Image.open(BytesIO(img))
        img = 255 - np.array(img.convert('L'))  #转化为灰度图
        cnt, img = utils.splitimage(img)
        img = np.expand_dims(img, axis=-1)  #到此result还是个图片
        img = model.predict(img)
示例#6
0
        print("Training....")
        for epoch in range(1, n_epochs + 1):
            model.train()
            tr_loss, tr_acc = fit(model, train_dl, criteria, optimizer, device)
            model.eval()
            val_loss, val_acc = valid(model, valid_dl, criteria, optimizer,
                                      device)
            test_loss, test_acc = valid(model, test_dl, criteria, optimizer,
                                        device)

            log_value('Loss/train', tr_loss, epoch)
            log_value('Accuracy/train', tr_acc, epoch)
            log_value('Loss/valid', val_loss, epoch)
            log_value('Accuracy/valid', val_acc, epoch)
            log_value('Loss/test', test_loss, epoch)
            log_value('Accuracy/test', test_acc, epoch)
            if val_loss < min_loss:
                savemodel(model, dir='siamese')
                min_loss = val_loss
            if epoch % id == 0:
                print(
                    "tr_loss {:.3f} acc {:.3f} valid_loss {:.3f} acc {:.3f} test_loss {:.3f} acc {:.3f}"
                    .format(tr_loss, tr_acc, val_loss, val_acc, test_loss,
                            test_acc))
    else:
        model = loadmodel(dir='siamese_base')
        print("loaded model\n evaluating....")
        test_loss, test_acc = valid(model, test_dl, criteria, optimizer,
                                    device)
        print("test_loss {:.3f} acc {:.3f}".format(test_loss, test_acc))
示例#7
0
def use_system2(sow,labmbda,n_results):


    # ===================== read the model =================

    master_phrases_vectors = use.loadmodel('master_phrases_vectors_2')
    tfidftransformer_1 = use.loadmodel('tfidftransformer_1_2')
    tfidftransformer_2 = use.loadmodel('tfidftransformer_2_2')
    master_nerank_vectors=use.loadmodel('master_nerank_vectors_2')

    vocab_map_2 = {v: k for k, v in tfidftransformer_2.vocabulary_.iteritems()}
    vocab_map_1 = {v: k for k, v in tfidftransformer_1.vocabulary_.iteritems()}

    # ===================== read sow =======================

    sow = sow#.decode('utf-8')


    # ===================== vectorize the SOW =======================

    sow_tf1=tfidftransformer_1.transform([sow])[0]
    sow_tf2=tfidftransformer_2.transform([sow])[0]

    if len(sow_tf1.indices) == 0 or len(sow_tf2.indices) == 0:
        print 'sow is not large enough for this system. Please, try System 1'
        return [],[]

    sow_final_vec=ne_rank(sow_tf1,sow_tf2,tfidftransformer_1,vocab_map_1,vocab_map_2)

    phrases=use.noun_tokenize(sow)
    phrases=list(set(phrases))
    phrases_vectors=[list(tfidftransformer_1.transform([x])[0].indices) for x in phrases]
    sow_phrase_dict = {}
    for x, phrase in zip(phrases_vectors, phrases):
        x = [sow_final_vec[y] for y in x if y in sow_final_vec.keys()]
        avg = np.sum(x)
        sow_phrase_dict[phrase] = avg


    # ===================== find cosine similarities =======================

    similarities=[]
    all_important_terms_tf=[]
    all_important_terms_keywords=[]
    for nerank_vec,phrase_dict in zip(master_nerank_vectors,master_phrases_vectors):
        sim_nerank,product_tf=use.get_cosine(nerank_vec,sow_final_vec)
        keys = product_tf.keys()
        values = product_tf.values()
        important_terms_tf = list(reversed(np.argsort(values)))
        important_terms_tf = [vocab_map_1[keys[x]] for x in important_terms_tf]
        all_important_terms_tf.append(important_terms_tf)

        sim_keyword,product_keyword=use.get_cosine(phrase_dict,sow_phrase_dict)
        keys = product_keyword.keys()
        values = product_keyword.values()
        important_terms_keyword = list(reversed(np.argsort(values)))
        important_terms_keyword = [keys[x] for x in important_terms_keyword]
        all_important_terms_keywords.append(important_terms_keyword)

        sim=(labmbda*sim_nerank)+((1-labmbda)*sim_keyword)
        similarities.append(sim)


    # ===================== rank the documents and print the top n =======================

    ranked_docs=list(reversed(np.argsort(similarities)))

    results_sim = []
    results_index = []
    for i in range(n_results):
        index=ranked_docs[i]
        # print similarities[index]
        results_sim.append(format(100*similarities[index],'.2f'))
        results_index.append(index)
        # print all_important_terms_tf[index]
        # print all_important_terms_keywords[index]

    return results_index, results_sim
示例#8
0
def use_system1(sow,labmbda,n_results):


    # ===================== read the model =================

    master_phrases_vectors=use.loadmodel('master_phrases_vectors_1')
    texts_all_tf=use.loadmodel('texts_all_tf_1')
    tfidftransformer=use.loadmodel('tfidftransformer_1')

    vocab_map = {v: k for k, v in tfidftransformer.vocabulary_.iteritems()}

    # ===================== read sow =======================

    sow=sow#.decode('utf-8')

    # ===================== vectorize the SOW =======================

    sow_tf=tfidftransformer.transform([sow])[0]
    sow_tf=sow_tf.todense()
    phrases=use.noun_tokenize(sow)
    phrases=list(set(phrases))
    phrases_vectors=[list(tfidftransformer.transform([x])[0].indices) for x in phrases]
    sow_phrase_dict = {}
    for x, phrase in zip(phrases_vectors, phrases):
        x = np.array(sow_tf).flatten()[x]
        avg = np.mean(x)
        sow_phrase_dict[phrase] = avg

    # ===================== find cosine similarities =======================

    similarities=[]
    all_important_terms_tf=[]
    all_important_terms_keywords=[]
    for text_tf,phrase_dict in zip(texts_all_tf,master_phrases_vectors):
        sim_tf=cosine_similarity(text_tf,sow_tf)
        product=np.array(text_tf.todense()).flatten()*np.array(sow_tf).flatten()
        important_terms_tf=list(reversed(np.argsort(product)))[:10]
        important_terms_tf=[vocab_map[x] for x in important_terms_tf]
        all_important_terms_tf.append(important_terms_tf)
        sim_tf=sim_tf.flatten()[0]
        sim_keyword,product_keyword=use.get_cosine(phrase_dict,sow_phrase_dict)
        keys=product_keyword.keys()
        values=product_keyword.values()
        important_terms_keyword = list(reversed(np.argsort(values)))
        important_terms_keyword=[keys[x] for x in important_terms_keyword]
        all_important_terms_keywords.append(important_terms_keyword)
        sim=(labmbda*sim_tf)+((1-labmbda)*sim_keyword)
        similarities.append(sim)


    # ===================== rank the documents and print the top n =======================

    ranked_docs=list(reversed(np.argsort(similarities)))
    results_sim=[]
    results_index=[]
    for i in range(n_results):
        index=ranked_docs[i]
        # print similarities[index]
        results_sim.append(format(100*similarities[index],'.2f'))
        results_index.append(index)
        # print all_important_terms_tf[index]
        # print all_important_terms_keywords[index]

    return results_index,results_sim