Exemplo n.º 1
0
def ItemSimilarity(train):
    # calculate co-rated users between items
    # create user-item invertable
    C = dict()
    N = dict()
    #这里train是一个user的商品列表{user1:{item1:0, item2:0,....}}
    # 已经按照user全部分好,是一张关于user-items的倒排索引
    for u, items in train.items():  #遍历所有users
        for i in items:
            N.setdefault(i, 0)
            N[i] += 1  # the num of users who like itemi
            C.setdefault(i, {})
            for j in items:
                if i == j:
                    continue
                C[i].setdefault(j, 0)
                C[i][j] += 1  # the num of users who like itemij

    # calculate finial similarity matrix W
    W = C.copy()
    for i, related_items in C.items():
        for j, cij in related_items.items():
            W[i][j] = cij / math.sqrt(N[i] * N[j])  # punish the popular items
    W = Normalize(W)
    return W
Exemplo n.º 2
0
def main():
    res152 = torch.nn.Sequential(
        Normalize(opt.mean, opt.std),
        models.resnet152(pretrained=True).eval().cuda())
    inc_v3 = torch.nn.Sequential(
        Normalize(opt.mean, opt.std),
        models.inception_v3(pretrained=True).eval().cuda())
    resnext50_32x4d = torch.nn.Sequential(
        Normalize(opt.mean, opt.std),
        models.resnext50_32x4d(pretrained=True).eval().cuda())
    dense161 = torch.nn.Sequential(
        Normalize(opt.mean, opt.std),
        models.densenet169(pretrained=True).eval().cuda())

    X = ImageNet(opt.input_dir, opt.input_csv, transforms)
    data_loader = DataLoader(X,
                             batch_size=opt.batch_size,
                             shuffle=False,
                             pin_memory=True,
                             num_workers=8)
    sum_res152, sum_v3, sum_rext, sum_den = 0, 0, 0, 0

    for images, _, gt_cpu in tqdm(data_loader):
        gt = gt_cpu.cuda()
        images = images.cuda()
        images_min = clip_by_tensor(images - opt.max_epsilon / 255.0, 0.0, 1.0)
        images_max = clip_by_tensor(images + opt.max_epsilon / 255.0, 0.0, 1.0)
        adv_img = graph(images, gt, images_min, images_max)

        with torch.no_grad():
            sum_res152 += (res152(adv_img).argmax(1) !=
                           gt).detach().sum().cpu()
            sum_v3 += (inc_v3(adv_img).argmax(1) != gt).detach().sum().cpu()
            sum_rext += (resnext50_32x4d(adv_img).argmax(1) !=
                         gt).detach().sum().cpu()
            sum_den += (dense161(adv_img).argmax(1) != gt).detach().sum().cpu()

    print('inc_v3 = {:.2%}'.format(sum_v3 / 1000.0))
    print('res152 = {:.2%}'.format(sum_res152 / 1000.0))
    print('dense = {:.2%}'.format(sum_den / 1000.0))
    print('rext = {:.2%}'.format(sum_rext / 1000.0))
Exemplo n.º 3
0
    def __init__(self):
        self.normalize = Normalize()
        self.twitter_filters = TwitterFilters()
        #self.test_list_buscape = self.read_review(path_buscape_reviews)
        self.test_list_tweets = self.read_review(path_tweets_reviews)
        #self.annot_list_buscape = self.read_review(path_buscape_annot)
        self.annot_list_tweets = self.read_review(path_tweets_annot)

        self.tokenizer = Tokenizer()
        #self.sentence_segment = SentenceSegment()
        #self.opphrase = OpPhrasesDetector()
        self.lex = LexLiu()
        self.chi2 = Chi2()
        self.svm = SVM()
Exemplo n.º 4
0
def updateNewIndexFrom(CurrentLine, LinePosition):
    print("Split into single word at " +
          strftime("%a, %d %b %Y %H:%M:%S", localtime()))
    RawWordsList = CurrentLine.split()
    WordPosition = 0
    for RawWord in RawWordsList:
        print("Normalize %s at position %s" % (RawWord, WordPosition))
        print("Timestamp: " + strftime("%a, %d %b %Y %H:%M:%S", localtime()))
        nWord = Normalize(RawWord)
        print("Add %s to words' dictionary in position %s" %
              (nWord, WordPosition))
        print("Timestamp: " + strftime("%a, %d %b %Y %H:%M:%S", localtime()))
        updateDictionary(nWord, LinePosition, WordPosition)
        WordPosition = WordPosition + 1
    return 1
Exemplo n.º 5
0
def getIndexFromDatabase(WordIndex, WordList):
    SearchIndex = defaultdict(list)
    for word in WordList:
        word = Normalize(word)
        print("get search word's inversed index from word index")
        print("Timestamp: " + strftime("%a, %d %b %Y %H:%M:%S", localtime()))
        i = 0
        for PositionList in WordIndex[word]:
            if i == 0:
                print("skip word count")
            else:
                SearchIndex[word].append(WordIndex[word][i])
            i = i + 1
        print("Curent index for %s" % word)
        print(SearchIndex[word])
        print("Timestamp: " + strftime("%a, %d %b %Y %H:%M:%S", localtime()))
    return SearchIndex
Exemplo n.º 6
0
def ItemSimilarity(train):
    # calculate co-rated users between items
    C = dict()
    N = dict()
    for u, items in train.items():
        for i in items:
            N.setdefault(i, 0)
            N[i] += 1
            C.setdefault(i, {})
            for j in items:
                if i == j:
                    continue
                C[i].setdefault(j, 0)
                # 这里认为活跃用户对商品之间相似度的贡献度远小于不活跃用户,因此对活跃用户做出惩罚
                C[i][j] += 1 / math.log(1 + len(items) * 1.0)

    # calculate finial similarity matrix W
    W = C.copy()
    for i, related_items in C.items():
        for j, cij in related_items.items():
            W[i][j] = cij / math.sqrt(N[i] * N[j])
    W = Normalize(W)
    return W
Exemplo n.º 7
0
def graph(x, gt, x_min, x_max):
    eps = opt.max_epsilon / 255.0
    num_iter = opt.num_iter_set
    alpha = eps / num_iter
    alpha_beta = alpha * opt.amplification
    gamma = alpha_beta

    model = torch.nn.Sequential(
        Normalize(opt.mean, opt.std),
        models.inception_v3(pretrained=True).eval().cuda())
    x.requires_grad = True
    amplification = 0.0
    for i in range(num_iter):
        zero_gradients(x)
        output_v3 = model(x)
        loss = F.cross_entropy(output_v3, gt)
        loss.backward()
        noise = x.grad.data

        # MI-FGSM
        # noise = noise / torch.abs(noise).mean([1,2,3], keepdim=True)
        # noise = momentum * grad + noise
        # grad = noise

        amplification += alpha_beta * torch.sign(noise)
        cut_noise = torch.clamp(abs(amplification) - eps, 0,
                                10000.0) * torch.sign(amplification)
        projection = gamma * torch.sign(
            project_noise(cut_noise, stack_kern, kern_size))
        amplification += projection

        # x = x + alpha * torch.sign(noise)
        x = x + alpha_beta * torch.sign(noise) + projection
        x = clip_by_tensor(x, x_min, x_max)
        x = V(x, requires_grad=True)

    return x.detach()
Exemplo n.º 8
0
 def __init__(self):
     Normalize.__init__(self)
     return None
Exemplo n.º 9
0
        print(model_name)
        if model_name == "VGG16":
            pretrained_model = models.vgg16_bn(pretrained=True)
        elif model_name == 'Resnet18':
            pretrained_model = models.resnet18(pretrained=True)
        elif model_name == 'Squeezenet':
            pretrained_model = models.squeezenet1_1(pretrained=True)
        elif model_name == 'Googlenet':
            pretrained_model = models.googlenet(pretrained=True)
        elif model_name == 'Adv_Denoise_Resnet152':
            pretrained_model = resnet152_denoise()
            loaded_state_dict = torch.load(
                os.path.join('weight', model_name + ".pytorch"))
            pretrained_model.load_state_dict(loaded_state_dict)
        if 'defense' in state and state['defense']:
            net = nn.Sequential(Normalize(mean, std), Permute([2, 1, 0]),
                                pretrained_model)
        else:
            net = nn.Sequential(Normalize(mean, std), pretrained_model)
        nets.append(net)

    model = nn.Sequential(Imagenet_Encoder(), Imagenet_Decoder())

    for i in range(len(nets)):
        nets[i] = torch.nn.DataParallel(nets[i], args.device)
        nets[i].eval()
        nets[i].to(device)

    model = torch.nn.DataParallel(model, args.device)
    model.to(device)
    print(model)
Exemplo n.º 10
0
#coding:utf-8
import numpy as ny
import scipy.io as sio
from SPL_kmeans import kMeans2
from Evaluate import evaluate
from MSPL import MSPL
from Normalize import Normalize

matFile = sio.loadmat("D:\dataSet\segment_uni.mat")
dataSet = ny.mat(matFile['X']).T
dataSet2 = []
dataSet2.append(ny.mat(matFile['X'][:, 0:9]).T)
dataSet2.append(ny.mat(matFile['X'][:, 9:19]).T)
nor = Normalize()
dataSet2 = nor.normalize(dataSet2)
#dataSet2=map(lambda x:x*3.5,nor.normalize2(dataSet2))
dataSet2[1] *= 2
Y = matFile['Y']
realAssment = []
etmp = ny.eye(7)
for i in range(Y.shape[0]):
    realAssment.append(etmp[Y[i, 0] - 1])
realAssment = ny.mat(realAssment).T
centroids = ny.mat(ny.zeros((19, 7)))
dims = [dataSet2[i].shape[0] for i in range(len(dataSet2))]
centroids2 = [ny.mat(ny.zeros((dims[i], 7))) for i in range(len(dataSet2))]
for i in range(centroids.shape[1]):
    index = int(ny.random.rand() * 2310)
    centroids[:, i] = dataSet[:, index]
    for v in range(len(dataSet2)):
        centroids2[v][:, i] = dataSet2[v][:, index]
Exemplo n.º 11
0
            self.updateV()
            self.updateW()
            times += 1
        print times


warnings.filterwarnings('error')
matFile = sio.loadmat("D:\dataSet\handwritten.mat")
dataSet = []
dataSet.append(matFile['mor'].T)
dataSet.append(matFile['fourier'].T)
dataSet.append(matFile['pixel'].T)
dataSet.append(matFile['kar'].T)
dataSet.append(matFile['profile'].T)
dataSet.append(matFile['zer'].T)
nor = Normalize()
dataSet = map(ny.array, nor.normalize(dataSet))
gnd = matFile['gnd']
realAssment = []
temp = ny.eye(10)
for i in range(gnd.shape[0]):
    realAssment.append(temp[gnd[i, 0]].tolist())
tw = TW_kmeans(10, 30, 7, dataSet)
pur = []
acc = []
nmi = []
for i in range(10):
    try:
        tw.kmeans()
        #tw.tw_kmeans()
        p, a, n = evaluate(ny.mat(tw.assment), ny.mat(realAssment).T)
Exemplo n.º 12
0
    def __init__(self, para):
        Subnet.__init__(self, para)
        self.layerList = []

        self.fork = Fork2({'instanceName': para['instanceName'] + '_fork'})
        self.layerList.append(self.fork)
        self.skipMode = para['skipMode']
        if self.skipMode == 'conv':
            convPara4 = {
                'instanceName': para['instanceName'] + '_skipConv1',
                'padding': False,
                'padShape': (0, 0),
                'stride': para['skipStride'],
                'outChannel': para['outChannel3'],
                'kernelShape': (1, 1),
                'bias': False
            }
            self.skipConv = Conv2D(convPara4)
            self.skipNorm = Normalize(
                {'instanceName': para['instanceName'] + '_skipNorm'})
            self.skipScale = Scale(
                {'instanceName': para['instanceName'] + '_skipScale'})
            self.layerList.append(self.skipConv)
            self.layerList.append(self.skipNorm)
            self.layerList.append(self.skipScale)

        convPara1 = {
            'instanceName': para['instanceName'] + '_mainConv1',
            'padding': False,
            'padShape': (0, 0),
            'stride': para['stride1'],
            'outChannel': para['outChannel1'],
            'kernelShape': (1, 1),
            'bias': False
        }
        convPara2 = {
            'instanceName': para['instanceName'] + '_mainConv2',
            'padding': True,
            'padShape': (1, 1),
            'stride': 1,
            'outChannel': para['outChannel2'],
            'kernelShape': (3, 3),
            'bias': False
        }
        convPara3 = {
            'instanceName': para['instanceName'] + '_mainConv3',
            'padding': False,
            'padShape': (0, 0),
            'stride': 1,
            'outChannel': para['outChannel3'],
            'kernelShape': (1, 1),
            'bias': False
        }

        self.mainConv1 = Conv2D(convPara1)
        self.mainNorm1 = Normalize(
            {'instanceName': para['instanceName'] + '_mainNorm1'})
        self.mainScale1 = Scale(
            {'instanceName': para['instanceName'] + '_mainScale1'})
        self.mainActivation1 = Activation({
            'instanceName':
            para['instanceName'] + '_mainReLU1',
            'activationType':
            para['activationType']
        })
        self.layerList.append(self.mainConv1)
        self.layerList.append(self.mainNorm1)
        self.layerList.append(self.mainScale1)
        self.layerList.append(self.mainActivation1)

        self.mainConv2 = Conv2D(convPara2)
        self.mainNorm2 = Normalize(
            {'instanceName': para['instanceName'] + '_mainNorm2'})
        self.mainScale2 = Scale(
            {'instanceName': para['instanceName'] + '_mainScale2'})
        self.mainActivation2 = Activation({
            'instanceName':
            para['instanceName'] + '_mainReLU2',
            'activationType':
            para['activationType']
        })
        self.layerList.append(self.mainConv2)
        self.layerList.append(self.mainNorm2)
        self.layerList.append(self.mainScale2)
        self.layerList.append(self.mainActivation2)

        self.mainConv3 = Conv2D(convPara3)
        self.mainNorm3 = Normalize(
            {'instanceName': para['instanceName'] + '_mainNorm3'})
        self.mainScale3 = Scale(
            {'instanceName': para['instanceName'] + '_mainScale3'})
        self.layerList.append(self.mainConv3)
        self.layerList.append(self.mainNorm3)
        self.layerList.append(self.mainScale3)

        self.sum = Sum2({'instanceName': para['instanceName'] + '_sum'})
        self.activation3 = Activation({
            'instanceName': para['instanceName'] + '_outReLU3',
            'activationType': para['activationType']
        })
        self.layerList.append(self.sum)
        self.layerList.append(self.activation3)
        self.bottomInterface = self.fork
        self.topInterface = self.activation3
Exemplo n.º 13
0
import numpy as ny
import scipy.io as sio
import warnings
from Evaluate import evaluate
from MSPL import MSPL
from Normalize import Normalize

matFile = sio.loadmat("D:\dataSet\handwritten.mat")
dataSet = []
dataSet.append(ny.mat(matFile['mor']).T)
dataSet.append(ny.mat(matFile['fourier']).T)
dataSet.append(ny.mat(matFile['pixel']).T)
dataSet.append(ny.mat(matFile['kar']).T)
dataSet.append(ny.mat(matFile['profile']).T)
dataSet.append(ny.mat(matFile['zer']).T)
nor = Normalize()
dataSet = map(lambda x: x * 15, nor.normalize(dataSet))
#dataSet=[dataSet[i]*20 for i in range(len(dataSet))]
#dataSet=map(lambda x:x*10,nor.normalize2(dataSet))
'''
dims=[dataSet[i].shape[0] for i in range(len(dataSet))]
centroids=[ny.mat(ny.zeros((dims[i],10))) for i in range(len(dataSet))]
for i in range(10):
    index=int(ny.random.rand()*200)+i*200
    #index=int(ny.random.rand()*2000)
    for j in range(len(dataSet)):
        centroids[j][:,i]=dataSet[j][:,index]
'''
gnd = matFile['gnd']
realAssment = []
temp = ny.eye(10)
Exemplo n.º 14
0
    def __init__(self, para):
        Net.__init__(self, para)
        convPara1 = {
            'instanceName': 'RN18' + '_Conv1',
            'padding': True,
            'padShape': (1, 1),
            'stride': 1,
            'outChannel': para['c1OutChannel'],
            'kernelShape': (3, 3),
            'bias': False
        }
        self.conv1 = Conv2D(convPara1)
        self.norm1 = Normalize({'instanceName': 'RN18' + '_Norm1'})
        self.scale1 = Scale({'instanceName': 'RN18' + '_Scale1'})
        self.activation1 = Activation({
            'instanceName': 'RN18' + '_Activation1',
            'activationType': 'ReLU'
        })
        self.layerList.append(self.conv1)
        self.layerList.append(self.norm1)
        self.layerList.append(self.scale1)
        self.layerList.append(self.activation1)
        convPara2 = {
            'instanceName': 'RN18' + '_Conv2',
            'padding': True,
            'padShape': (1, 1),
            'stride': 2,
            'outChannel': para['c2OutChannel'],
            'kernelShape': (3, 3),
            'bias': False
        }
        self.conv2 = Conv2D(convPara2)
        self.norm2 = Normalize({'instanceName': 'RN18' + '_Norm2'})
        self.scale2 = Scale({'instanceName': 'RN18' + '_Scale2'})
        self.activation2 = Activation({
            'instanceName': 'RN18' + '_Activation2',
            'activationType': 'ReLU'
        })
        self.layerList.append(self.conv2)
        self.layerList.append(self.norm2)
        self.layerList.append(self.scale2)
        self.layerList.append(self.activation2)
        self.rnb1 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB1',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb1)
        self.rnb2 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB2',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb2)
        self.rnb3 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB3',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb3)
        self.rnb4 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB4',
            'skipMode': 'conv',
            'skipStride': 2,
            'stride1': 2,
            'outChannel1': int(para['rnb4OutChannel'] / 4),
            'outChannel2': int(para['rnb4OutChannel'] / 4),
            'outChannel3': para['rnb4OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb4)
        self.rnb5 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB5',
            'skipMode': 'identity',
            'skipStride': 1,
            'stride1': 1,
            'outChannel1': int(para['rnb4OutChannel'] / 4),
            'outChannel2': int(para['rnb4OutChannel'] / 4),
            'outChannel3': para['rnb4OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb5)
        self.pool1 = Pool({
            'instanceName': 'RN18' + '_pool1',
            'poolType': 'ave',
            'stride': para['pSize'],
            'kernelShape': (para['pSize'], para['pSize'])
        })
        self.layerList.append(self.pool1)
        self.fc1 = FullyConnected({
            'instanceName': 'RN18' + '_fc1',
            'outChannel': para['classNum'],
            'bias': True
        })
        self.layerList.append(self.fc1)
        self.softmax = Softmax({'instanceName': 'RN18' + '_softmax'})
        self.layerList.append(self.softmax)

        self.bottomInterface = self.conv1
        self.topInterface = self.softmax
        self.softmax.setNet(self)
Exemplo n.º 15
0
import warnings
#import time
from Normalize import Normalize
from Evaluate import evaluate
from TW_SPL import TW_kmeans

warnings.filterwarnings('error')
matFile = sio.loadmat("D:\dataSet\handwritten.mat")
dataSet = []
dataSet.append(matFile['mor'].T)
dataSet.append(matFile['fourier'].T)
dataSet.append(matFile['pixel'].T)
dataSet.append(matFile['kar'].T)
dataSet.append(matFile['profile'].T)
dataSet.append(matFile['zer'].T)
nor = Normalize()
dataSet = map(ny.mat, dataSet)
#dataSet=map(lambda x:ny.array(x*6),nor.rowNormalize(dataSet))
dataSet = map(lambda x: ny.array(x * 5), nor.linerNormalize(dataSet))
'''
w=ny.array([1./6**0.5,1./76**0.5,1./240**0.5,1./64**0.5,1./216**0.5,1./47**0.5])
w/=w.sum()
print w
dataSet=map(lambda x,y:x*y,dataSet,w)
'''
gnd = matFile['gnd']
realAssment = []
temp = ny.eye(10)
for i in range(gnd.shape[0]):
    realAssment.append(temp[gnd[i, 0]].tolist())
tw = TW_kmeans(10, 12000 * 25, 7, 0.15, 1.6, dataSet)
Exemplo n.º 16
0
#coding:utf-8
import scipy.io as sio
from SPL_kmeans import kMeans2
import numpy as ny
from Evaluate import evaluate
from Normalize import Normalize
from MSPL import MSPL

matFile = sio.loadmat("D:\dataSet\yale_mtv.mat")
dataSet = [
    ny.mat(matFile['X'][0, 0]),
    ny.mat(matFile["X"][0, 1]),
    ny.mat(matFile["X"][0, 2])
]
nor = Normalize()
dataSet = nor.normalize(dataSet)
realAssment = ny.mat(ny.zeros((15, 165)))
gt = matFile['gt']
etmp = ny.mat(ny.eye(15))
for i in range(gt.shape[0]):
    realAssment[:, i] = etmp[:, gt[i, 0] - 1]
dims = [dataSet[i].shape[0] for i in range(len(dataSet))]
centroids = [ny.mat(ny.zeros((dims[i], 15))) for i in range(len(dataSet))]
for i in range(15):
    index = int(ny.random.rand() * 165)
    for v in range(len(dataSet)):
        centroids[v][:, i] = dataSet[v][:, index]
dataSet2 = dataSet[0]
centroids2 = centroids[0]
for i in range(1, len(dataSet)):
    dataSet2 = ny.vstack((dataSet2, dataSet[i]))
Exemplo n.º 17
0
for key, val in weight.items():
    if key.startswith('0.'):
        encoder_weight[key[2:]] = val
    elif key.startswith('1.'):
        decoder_weight[key[2:]] = val

test_loader, nlabels, labels, mean, std = DataLoader.gvision(config)

if 'OSP' in config:
    if config['source_model_name'] == 'Adv_Denoise_Resnet152':
        s_model = resnet152_denoise()
        loaded_state_dict = torch.load(
            os.path.join('weight', config['source_model_name'] + ".pytorch"))
        s_model.load_state_dict(loaded_state_dict)
    if 'defense' in config and config['defense']:
        source_model = nn.Sequential(Normalize(mean, std), Permute([2, 1, 0]),
                                     s_model)
    else:
        source_model = nn.Sequential(Normalize(mean, std), s_model)

encoder = Imagenet_Encoder()
decoder = Imagenet_Decoder()
encoder.load_state_dict(encoder_weight)
decoder.load_state_dict(decoder_weight)

gvision = gvision_wrapper.GvisionWrapper()

encoder.to(device)
encoder.eval()
decoder.to(device)
decoder.eval()
Exemplo n.º 18
0
    def create_second_inverted(self):
        file = np.read_csv(self.file_names[0])
        stem_end = ['ات', 'ان', 'ترین', 'تر', 'ش', 'یی', 'ها', 'ٔ', '‌ا', '']
        start = ['می ']
        end = [' یمان', ' یم', ' یش', ' یشان', ' یتان', ' ام']
        Norm = Normalize
        lemm = Lemmatizer()
        stem = Stemmer()
        tempfreq = dict()
        self.totalDocs = int(len(file['content']) / 10)
        for i in range(0, self.totalDocs):
            compile_patterns = lambda patterns: [(re.compile(pattern), repl)
                                                 for pattern, repl in patterns]
            clean_text = BeautifulSoup(file['content'][i],
                                       "html.parser").getText()
            regex = re.compile('[a-zA-Z]')
            clean_text = regex.sub(' ', clean_text)
            junk_chars_regex = r'[^a-zA-Z0-9\u0621-\u06CC\u0698\u067E\u0686\u06AF\u200c]'
            remove_junk_characters = (junk_chars_regex, ' ')
            compiled_patterns_after = compile_patterns(
                [remove_junk_characters])
            for pattern, repl in compiled_patterns_after:
                clean_text = pattern.sub(repl, clean_text)

            clean_text = Norm.normalization(clean_text)
            clean_text = clean_text.replace('\u200c', " ")
            clean_text = Normalize.fix_phrases(clean_text)

            for x in start:
                y = x.replace(" ", '\u200c')
                clean_text = clean_text.replace(x, y)
            for x in end:
                y = x.replace(" ", '\u200c')
                clean_text = clean_text.replace(x, y)
            words = clean_text.split()

            for w in words:
                f = 0
                for end in stem_end:
                    if w.endswith(end):
                        w = stem.stem(w)
                        f = 1
                        break
                temp = w
                w = lemm.lemmatize((w)).split("#")[0]
                # if (w == "خواه"):
                #     print(temp)
                if (w not in self.frequency):
                    self.frequency[w] = 0
                    self.frequency[w] = self.frequency[w] + 1
                else:
                    self.frequency[w] = self.frequency[w] + 1
                if (w not in self.dictionary):
                    self.dictionary[w] = set()
                    self.dictionary[w].add(i)
                else:
                    self.dictionary[w].add(i)
                if (w not in self.champ):
                    self.champ[w] = dict()
                    if (i not in self.champ[w]):
                        self.champ[w][i] = 0
                        self.champ[w][i] = self.champ[w][i] + 1
                else:
                    if (i not in self.champ[w]):
                        self.champ[w][i] = 0
                        self.champ[w][i] = self.champ[w][i] + 1

                    else:
                        self.champ[w][i] = self.champ[w][i] + 1

                if (w not in tempfreq):
                    tempfreq[w] = list()
                    tempfreq[w] = numpy.zeros(self.totalDocs)
                    tempfreq[w][i] = tempfreq[w][i] + 1
                else:
                    tempfreq[w][i] = tempfreq[w][i] + 1
        self.tf = tempfreq
        self.dictionary = Norm.remove_stopwords(self.dictionary)
        self.frequency = Norm.remove_stopwords(self.frequency)
        with open('dict.txt', 'w', encoding="utf-8") as f:
            for key in self.dictionary:
                f.write('%s,' % key)
                for j in self.dictionary[key]:
                    f.write("%s " % str(j))
                f.write("\n")

        with open('tf.txt', 'w', encoding="utf-8") as f:
            for key in self.tf:
                f.write('%s,' % key)
                for j in self.tf[key]:
                    f.write("%s " % str(j))
                f.write("\n")

        return self.dictionary, self.totalDocs, self.tf
Exemplo n.º 19
0
    def initialize_secondinverted(self, samples):
        dictionary = dict()
        frequency = dict()
        cf_formula = dict()
        file = np.read_csv('ir-news-2-4.csv')

        stem_end = ['ات', 'ان', 'ترین', 'تر', 'ش', 'یی', 'ها', 'ٔ', '‌ا', '']
        start = ['می ']
        end = [' یمان', ' یم', ' یش', ' یشان', ' یتان', ' ام']

        lemm = Lemmatizer()
        stem = Stemmer()
        Token_sum = 1
        file_choise = random.randrange(6)
        file = np.read_csv(self.file_names[file_choise])
        b = 0.5
        k = 23
        for i in range(samples):
            if (i % 5000 == 0):
                file_choise = random.randrange(6)
                file = np.read_csv(self.file_names[file_choise])
            doc_choise = random.randrange(len(file))
            compile_patterns = lambda patterns: [(re.compile(pattern), repl)
                                                 for pattern, repl in patterns]
            clean_text = BeautifulSoup(file['content'][doc_choise],
                                       "html.parser").getText()
            regex = re.compile('[a-zA-Z]')
            clean_text = regex.sub(' ', clean_text)
            junk_chars_regex = r'[^a-zA-Z0-9\u0621-\u06CC\u0698\u067E\u0686\u06AF\u200c]'
            remove_junk_characters = (junk_chars_regex, ' ')
            compiled_patterns_after = compile_patterns(
                [remove_junk_characters])
            for pattern, repl in compiled_patterns_after:
                clean_text = pattern.sub(repl, clean_text)

            clean_text = Norm.normalization(clean_text)
            clean_text = clean_text.replace('\u200c', " ")
            clean_text = Normalize.fix_phrases(clean_text)

            for x in start:
                y = x.replace(" ", '\u200c')
                clean_text = clean_text.replace(x, y)
            for x in end:
                y = x.replace(" ", '\u200c')
                clean_text = clean_text.replace(x, y)

            words = clean_text.split()
            Token_sum = Token_sum + len(words)
            self.tokens.append(math.log10(Token_sum))
            self.Heap_line.append(b * math.log10(Token_sum) + math.log10(k))

            for w in words:
                f = 0
                for end in stem_end:
                    if w.endswith(end):
                        w = stem.stem(w)
                        f = 1
                        break

                w = lemm.lemmatize((w)).split("#")[0]
                if (w not in frequency):
                    frequency[w] = 0
                    frequency[w] = frequency[w] + 1
                else:
                    frequency[w] = frequency[w] + 1
                if (w not in dictionary):
                    dictionary[w] = set()
                    dictionary[w].add(doc_choise)
                else:
                    dictionary[w].add(doc_choise)
            self.M.append(math.log10(len(dictionary)))
        dictionary = Norm.remove_stopwords(dictionary)
        frequency = Norm.remove_stopwords(frequency)
        frequency = dict(sorted(frequency.items(), key=lambda kv: kv[1]))
        K = 0
        Sum = 0
        for i, key in enumerate(frequency.keys()):
            Sum = Sum + frequency[key]
            if (i == len(frequency) - 1):
                Sum = Sum + frequency[key]
                K = frequency[key]
                break

        for i, key in enumerate(frequency.keys()):
            self.cf_accuall.append(math.log10(frequency[key]))
            cf_formula[key] = math.log10(K / (len(frequency) + 1 - (i + 1)))
            self.cf_list.append(math.log10(K / (len(frequency) + 1 - (i + 1))))
            self.nums.append(math.log10(len(frequency) + 1 - (i + 1)))
Exemplo n.º 20
0
class RN18(Net):
    '''
    ResNet18 has a total of 18 layers: 
    Note that some parameters are predetermined. The parameters need to be specified are in ''.
    For all ResNetBlock modules, the output sizes of stage 1 and stage 2 conv2D blocks equals to 
    1/4 of that of the final stage.
    Conv1 - kernel:(3x3), pad:(1,1), stride:1, output: 'c1OutChannel'
    Conv1 - kernel:(3x3), pad:(1,1), stride:2, output: 'c2OutChannel'  # H and W reduced by half
    RNB1 - skipMode:identity, output : 'rnb1OutChannel' 
    RNB2 - skipMode:identity, output : same as RNB1
    RNB3 - skipMode:identity, output : same as RNB1
    RNB4 - skipMode:conv, skipStride:2, output : 'rnb4OutChannel' # H and W reduced by half
    RNB5 - skipMode:identity, output : same as RNB4
    pool - average pooling of RNB5 per channel, reducing output to 'rnb4OutChannel', need to specify
            'pSize', which is used to specify stride and kernel size
    fc - outChannel: 'classNum'
    softmax - final classification layer
    '''
    def __init__(self, para):
        Net.__init__(self, para)
        convPara1 = {
            'instanceName': 'RN18' + '_Conv1',
            'padding': True,
            'padShape': (1, 1),
            'stride': 1,
            'outChannel': para['c1OutChannel'],
            'kernelShape': (3, 3),
            'bias': False
        }
        self.conv1 = Conv2D(convPara1)
        self.norm1 = Normalize({'instanceName': 'RN18' + '_Norm1'})
        self.scale1 = Scale({'instanceName': 'RN18' + '_Scale1'})
        self.activation1 = Activation({
            'instanceName': 'RN18' + '_Activation1',
            'activationType': 'ReLU'
        })
        self.layerList.append(self.conv1)
        self.layerList.append(self.norm1)
        self.layerList.append(self.scale1)
        self.layerList.append(self.activation1)
        convPara2 = {
            'instanceName': 'RN18' + '_Conv2',
            'padding': True,
            'padShape': (1, 1),
            'stride': 2,
            'outChannel': para['c2OutChannel'],
            'kernelShape': (3, 3),
            'bias': False
        }
        self.conv2 = Conv2D(convPara2)
        self.norm2 = Normalize({'instanceName': 'RN18' + '_Norm2'})
        self.scale2 = Scale({'instanceName': 'RN18' + '_Scale2'})
        self.activation2 = Activation({
            'instanceName': 'RN18' + '_Activation2',
            'activationType': 'ReLU'
        })
        self.layerList.append(self.conv2)
        self.layerList.append(self.norm2)
        self.layerList.append(self.scale2)
        self.layerList.append(self.activation2)
        self.rnb1 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB1',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb1)
        self.rnb2 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB2',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb2)
        self.rnb3 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB3',
            'skipMode': 'identity',
            'skipStride': 0,
            'stride1': 1,
            'outChannel1': int(para['rnb1OutChannel'] / 4),
            'outChannel2': int(para['rnb1OutChannel'] / 4),
            'outChannel3': para['rnb1OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb3)
        self.rnb4 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB4',
            'skipMode': 'conv',
            'skipStride': 2,
            'stride1': 2,
            'outChannel1': int(para['rnb4OutChannel'] / 4),
            'outChannel2': int(para['rnb4OutChannel'] / 4),
            'outChannel3': para['rnb4OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb4)
        self.rnb5 = ResNetBlock({
            'instanceName': 'RN18' + '_RNB5',
            'skipMode': 'identity',
            'skipStride': 1,
            'stride1': 1,
            'outChannel1': int(para['rnb4OutChannel'] / 4),
            'outChannel2': int(para['rnb4OutChannel'] / 4),
            'outChannel3': para['rnb4OutChannel'],
            'activationType': 'ReLU'
        })
        self.layerList.append(self.rnb5)
        self.pool1 = Pool({
            'instanceName': 'RN18' + '_pool1',
            'poolType': 'ave',
            'stride': para['pSize'],
            'kernelShape': (para['pSize'], para['pSize'])
        })
        self.layerList.append(self.pool1)
        self.fc1 = FullyConnected({
            'instanceName': 'RN18' + '_fc1',
            'outChannel': para['classNum'],
            'bias': True
        })
        self.layerList.append(self.fc1)
        self.softmax = Softmax({'instanceName': 'RN18' + '_softmax'})
        self.layerList.append(self.softmax)

        self.bottomInterface = self.conv1
        self.topInterface = self.softmax
        self.softmax.setNet(self)

    def stack(self, top, bottom):
        self.top = top
        self.bottom = bottom

        self.conv1.stack(self.norm1, bottom)
        self.norm1.stack(self.scale1, self.conv1)
        self.scale1.stack(self.activation1, self.norm1)
        self.activation1.stack(self.conv2, self.scale1)

        self.conv2.stack(self.norm2, self.activation1)
        self.norm2.stack(self.scale2, self.conv2)
        self.scale2.stack(self.activation2, self.norm2)
        self.activation2.stack(self.rnb1, self.scale2)

        self.rnb1.stack(self.rnb2, self.activation2)
        self.rnb2.stack(self.rnb3, self.rnb1)
        self.rnb3.stack(self.rnb4, self.rnb2)
        self.rnb4.stack(self.rnb5, self.rnb3)
        self.rnb5.stack(self.pool1, self.rnb4)
        self.pool1.stack(self.fc1, self.rnb5)
        self.fc1.stack(self.softmax, self.pool1)
        self.softmax.stack(top, self.fc1)
        self.softmax.setSource(bottom)
from torchvision.datasets import ImageFolder
import warnings
from universal_pert import universal_perturbation
warnings.filterwarnings("ignore")
import numpy as np
from torch.utils.data import DataLoader
import torch


epsilon = 10.0 / 255.0
training_data_path = 'input your path (e.g., '../data/ILSVRC2012_train/pick_image/')'
testing_data_path = 'input your path'
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
# std = [1.0, 1.0, 1.0]
net = torch.nn.Sequential(Normalize(mean, std), models.inception_v3(pretrained=True).eval()).cuda()

transform = transforms.Compose([
    transforms.Resize((330, 330)),
    transforms.CenterCrop(299),
    transforms.ToTensor()])


print('loader data')
X = ImagetNet(training_data_path, 1000, 10, transforms = transform)

# X = torch.utils.data.DataLoader(
#     ImageFolder(training_data_path, transforms.Compose([
#         transforms.Resize((330, 330)),
#         transforms.CenterCrop(299),
#         transforms.ToTensor(),
Exemplo n.º 22
0
 def __init__(self, website, camperid, day, dest, page, year, refresh):
     Normalize.__init__(self, website, camperid, day, dest, page, year, refresh)
     return None
Exemplo n.º 23
0
def main():

    dense = DataParallel(
        torch.nn.Sequential(
            Normalize(opt.mean, opt.std),
            models.densenet121(pretrained=True).eval())).cuda()
    res = DataParallel(
        torch.nn.Sequential(Normalize(opt.mean, opt.std),
                            models.resnet50(pretrained=True).eval())).cuda()
    res101 = DataParallel(
        torch.nn.Sequential(Normalize(opt.mean, opt.std),
                            models.resnet101(pretrained=True).eval())).cuda()
    # wide = DataParallel(torch.nn.Sequential(Normalize(opt.mean, opt.std),
    #                             models.wide_resnet101_2(pretrained=True).eval())).cuda()
    vgg = DataParallel(
        torch.nn.Sequential(Normalize(opt.mean, opt.std),
                            models.vgg19(pretrained=True).eval())).cuda()
    dense169 = DataParallel(
        torch.nn.Sequential(
            Normalize(opt.mean, opt.std),
            models.densenet169(pretrained=True).eval())).cuda()
    eff = DataParallel(
        nn.Sequential(
            Normalize_TF(),
            timm.create_model('tf_efficientnet_b5',
                              pretrained=True).eval())).cuda()

    # lpipsLoss = DataParallel(lpips.LPIPS(net='vgg', verbose=False).cuda())

    X = ImageNet(opt.input_dir, opt.input_csv, transforms)
    data_loader = DataLoader(X,
                             batch_size=opt.batch_size,
                             shuffle=False,
                             pin_memory=True,
                             num_workers=8)
    # sum_dense, sum_res, sum_res101, sum_dense169, sum_vgg, sum_xception, sum_adv, sum_eff, sum_wide = 0,0,0,0,0,0,0,0, 0

    if not os.path.exists(opt.output_dir):
        os.makedirs(opt.output_dir)

    iter = 0
    for images, name, gt_cpu in tqdm(data_loader):
        iter += 1
        gt = gt_cpu.cuda()
        images = images.cuda()
        images_min = clip_by_tensor(images - opt.max_epsilon / 255.0, 0.0, 1.0)
        images_max = clip_by_tensor(images + opt.max_epsilon / 255.0, 0.0, 1.0)
        adv_img = graph(images,
                        gt,
                        images_min,
                        images_max,
                        eff=eff,
                        dense=dense,
                        res50=res,
                        res101=res101,
                        dense169=dense169,
                        vgg=vgg)

        for i in range(len(adv_img)):
            save_img(opt.output_dir + '{}'.format(name[i]),
                     adv_img[i].detach().permute(1, 2, 0).cpu())
Exemplo n.º 24
0
import scipy.io as sio
from Evaluate import evaluate
from MSPL import MSPL
from Normalize import Normalize
from SPL_kmeans import kMeans2
#import time

matFile=sio.loadmat("D:\dataSet\handwritten.mat")
dataSet=[]
dataSet.append(ny.mat(matFile['mor']).T)
dataSet.append(ny.mat(matFile['fourier']).T)
dataSet.append(ny.mat(matFile['pixel']).T)
dataSet.append(ny.mat(matFile['kar']).T)
dataSet.append(ny.mat(matFile['profile']).T)
dataSet.append(ny.mat(matFile['zer']).T)
nor=Normalize()
#dataSet=nor.linerNormalize(dataSet);
#dataSet=map(lambda x:x*5,nor.normalize2(dataSet))
dataSet=map(lambda x:x*8,nor.normalize2(dataSet))
#print map(lambda x:ny.power(x,2).sum(),dataSet)
dims=[dataSet[i].shape[0] for i in range(len(dataSet))]
centroids=[ny.mat(ny.zeros((dims[i],10))) for i in range(len(dataSet))]
for i in range(10):
    #index=int(ny.random.rand()*200)+i*200
    index=int(ny.random.rand()*2000)
    for j in range(len(dataSet)):
        centroids[j][:,i]=dataSet[j][:,index]
dataSet2=dataSet[0]
centroids2=centroids[0]
for i in range(1,len(dataSet)):
    dataSet2=ny.vstack((dataSet2,dataSet[i]))
Exemplo n.º 25
0
class ResNetBlock(Subnet):
    '''
    On the main path, the first convolution block has (1x1) kernel, zero padding. The second 
    convolution block has (3x3) kernel, (1,1) padding and stride of 1. The third convolution 
    block has (1x1) kernel, zero padding and stride of 1. The skip path has either identity mapping
    or a convolution block with (1x1) kernel and zero padding. The number of output channels is 
    the same as that of the third convolution block on the main path.
    
    Parameters required: 
    'instanceName': name of the block
    'skipMode': slect the operations on the skip path, 'conv' or 'identity'
    'skipStride': stride of the convolution block on the skip path
    'stride1': stride of the first convolution block on the main path
    'outChannel1': number of output channel of the first convolution block on the main path
    'outChannel2': number of output channel of the second convolution block on the main path
    'outChannel3': number of output channel of the third convolution block on the main path
    'activationType': activation function of the non-linear block, 'ReLU' or 'sigmoid'
    '''

    # 'conv' mode has a convolution block on the skip path. 'identity' mode is strict pass through.
    skipModes = ['conv', 'identity']

    def __init__(self, para):
        Subnet.__init__(self, para)
        self.layerList = []

        self.fork = Fork2({'instanceName': para['instanceName'] + '_fork'})
        self.layerList.append(self.fork)
        self.skipMode = para['skipMode']
        if self.skipMode == 'conv':
            convPara4 = {
                'instanceName': para['instanceName'] + '_skipConv1',
                'padding': False,
                'padShape': (0, 0),
                'stride': para['skipStride'],
                'outChannel': para['outChannel3'],
                'kernelShape': (1, 1),
                'bias': False
            }
            self.skipConv = Conv2D(convPara4)
            self.skipNorm = Normalize(
                {'instanceName': para['instanceName'] + '_skipNorm'})
            self.skipScale = Scale(
                {'instanceName': para['instanceName'] + '_skipScale'})
            self.layerList.append(self.skipConv)
            self.layerList.append(self.skipNorm)
            self.layerList.append(self.skipScale)

        convPara1 = {
            'instanceName': para['instanceName'] + '_mainConv1',
            'padding': False,
            'padShape': (0, 0),
            'stride': para['stride1'],
            'outChannel': para['outChannel1'],
            'kernelShape': (1, 1),
            'bias': False
        }
        convPara2 = {
            'instanceName': para['instanceName'] + '_mainConv2',
            'padding': True,
            'padShape': (1, 1),
            'stride': 1,
            'outChannel': para['outChannel2'],
            'kernelShape': (3, 3),
            'bias': False
        }
        convPara3 = {
            'instanceName': para['instanceName'] + '_mainConv3',
            'padding': False,
            'padShape': (0, 0),
            'stride': 1,
            'outChannel': para['outChannel3'],
            'kernelShape': (1, 1),
            'bias': False
        }

        self.mainConv1 = Conv2D(convPara1)
        self.mainNorm1 = Normalize(
            {'instanceName': para['instanceName'] + '_mainNorm1'})
        self.mainScale1 = Scale(
            {'instanceName': para['instanceName'] + '_mainScale1'})
        self.mainActivation1 = Activation({
            'instanceName':
            para['instanceName'] + '_mainReLU1',
            'activationType':
            para['activationType']
        })
        self.layerList.append(self.mainConv1)
        self.layerList.append(self.mainNorm1)
        self.layerList.append(self.mainScale1)
        self.layerList.append(self.mainActivation1)

        self.mainConv2 = Conv2D(convPara2)
        self.mainNorm2 = Normalize(
            {'instanceName': para['instanceName'] + '_mainNorm2'})
        self.mainScale2 = Scale(
            {'instanceName': para['instanceName'] + '_mainScale2'})
        self.mainActivation2 = Activation({
            'instanceName':
            para['instanceName'] + '_mainReLU2',
            'activationType':
            para['activationType']
        })
        self.layerList.append(self.mainConv2)
        self.layerList.append(self.mainNorm2)
        self.layerList.append(self.mainScale2)
        self.layerList.append(self.mainActivation2)

        self.mainConv3 = Conv2D(convPara3)
        self.mainNorm3 = Normalize(
            {'instanceName': para['instanceName'] + '_mainNorm3'})
        self.mainScale3 = Scale(
            {'instanceName': para['instanceName'] + '_mainScale3'})
        self.layerList.append(self.mainConv3)
        self.layerList.append(self.mainNorm3)
        self.layerList.append(self.mainScale3)

        self.sum = Sum2({'instanceName': para['instanceName'] + '_sum'})
        self.activation3 = Activation({
            'instanceName': para['instanceName'] + '_outReLU3',
            'activationType': para['activationType']
        })
        self.layerList.append(self.sum)
        self.layerList.append(self.activation3)
        self.bottomInterface = self.fork
        self.topInterface = self.activation3

    def stack(self, top, bottom):
        self.top = top
        self.bottom = bottom
        if self.skipMode == 'conv':
            self.fork.fork(self.skipConv, self.mainConv1, bottom)
            self.skipConv.stack(self.skipNorm, self.fork.skip)
            self.skipNorm.stack(self.skipScale, self.skipConv)
            self.skipScale.stack(self.sum.skip, self.skipNorm)
        else:
            self.fork.fork(self.sum.skip, self.mainConv1, bottom)
        # main path
        self.mainConv1.stack(self.mainNorm1, self.fork.main)
        self.mainNorm1.stack(self.mainScale1, self.mainConv1)
        self.mainScale1.stack(self.mainActivation1, self.mainNorm1)
        self.mainActivation1.stack(self.mainConv2, self.mainScale1)

        self.mainConv2.stack(self.mainNorm2, self.mainActivation1)
        self.mainNorm2.stack(self.mainScale2, self.mainConv2)
        self.mainScale2.stack(self.mainActivation2, self.mainNorm2)
        self.mainActivation2.stack(self.mainConv3, self.mainScale2)

        self.mainConv3.stack(self.mainNorm3, self.mainActivation2)
        self.mainNorm3.stack(self.mainScale3, self.mainConv3)
        self.mainScale3.stack(self.sum.main, self.mainNorm3)
        # sum
        if self.skipMode == 'conv':
            self.sum.sum(self.activation3, self.skipScale, self.mainScale3)
        else:
            self.sum.sum(self.activation3, self.fork.skip, self.mainScale3)
        self.activation3.stack(top, self.sum)