예제 #1
0
def prop(n):
    save = toList(n)
    save.sort()
    
    factors = [2, 3, 4, 5, 6]
    
    for i in factors:
        tmp = toList(i*n)
        tmp.sort()
        
        if tmp != save:
            return False
            
    return True
예제 #2
0
    def predict(self, dataset):
        self.model.eval()
        bar = tqdm(dataset, smoothing=0)
        r = []
        for i, (x, y) in enumerate(bar):
            x, y = [createVariable(z, self.use_cuda) for z in [x, y]]

            prob = self.model(x)

            prob = F.softmax(prob)
            conf, pred = torch.max(prob, 1)
            r.extend(
                zip(x.data.cpu().numpy(), toList(pred), toList(y),
                    toList(conf)))
        bar.close()
        return r
예제 #3
0
    def fit(self, dataset):
        self.model.train()
        bar = tqdm(dataset, smoothing=0)
        avgLoss = Average('Loss', num=20)
        acc = Average('TAcc')
        for i, (x, y) in enumerate(bar):
            x, y = [createVariable(z, self.use_cuda) for z in [x, y]]

            prob = self.model(x)

            loss = F.cross_entropy(prob, y)
            avgLoss.append(toList(loss)[0])

            pred = torch.max(prob.data, 1)[1]
            corr = (pred == y.data).sum()
            total = y.size(0)
            acc.append(corr / total)

            self.optimizer.zero_grad()
            loss.backward()
            torch.nn.utils.clip_grad_norm(self.model.parameters(), 10)
            self.optimizer.step()

            logs = logging((avgLoss, acc))
            bar.desc = logs

        bar.close()
        return [avgLoss, acc]
예제 #4
0
    def score(self, n):
        list_n = toList(n)

        if len(list_n) > len(self.patterns[0]):
            self.upgrade_patterns(len(list_n))

        max_score = 0
        min_prime = n**2

        for pattern in self.patterns:
            # print(pattern)
            local_score = 0
            local_min = min_prime
            for i in range(10):
                replaced = self.replace(list_n, pattern, i)

                if replaced >= n and self.pc.prime(replaced):
                    # print(replaced)
                    local_min = min(local_min, replaced)
                    local_score += 1

            # print(local_score)
            if local_score > max_score:
                max_score = local_score
                min_prime = local_min

        return max_score, min_prime
예제 #5
0
def movingAverage(interval, window_size, factor):
    interval = pu.toList(interval)
    if(window_size %2 == 0):
        window_size += 1
    
    window = np.ones(int(window_size))
    half = int(window_size/2)
    center = half + 1
    for i in xrange(1, center):
        window[half + i] = window[half + i -1]/float(factor)
        window[half - i] = window[half - i + 1]/float(factor) 
    s = np.sum(window)
    window = [x/s for x in window]
    head = [np.mean(interval[:window_size])]*window_size
    tail = [np.mean(interval[-window_size:])]*window_size
    conv = np.convolve(head + interval + tail, window, 'same')
    retVal = conv[window_size:-window_size] 
    return pu.toList(retVal)
예제 #6
0
    def predict(self, illums):
        # FIXME:
        self.G.train()
        self.D.eval()
        bar = tqdm(illums, smoothing=0)
        r = []
        for i, illum in enumerate(bar):
            illum = torch.FloatTensor([illum])

            # Training Generator
            noise = createVariable(torch.randn(1, noiseDim), self.use_cuda,
                                   True)
            illum = createVariable(illum, self.use_cuda, True)

            x = self.G(noise, illum)
            #  x = torch.clamp(x, 0, 1)
            isReal, illum = self.D(x)
            r.append(
                (x.data.cpu().numpy()[0], toList(isReal)[0], toList(illum)[0]))

        bar.close()
        return r
예제 #7
0
def addFile(db, samid, procid, file, sample_name, file_type):

    ## Make sure file is a list
    file = utils.toList(file)

    for f in file:

        ## Compute md5sum, file size in bytes
        file_md5 = utils.md5(f)
        file_size = os.path.getsize(f)

        ## Open the connection, get the cursor
        conn = sqlite3.connect(db, timeout=1000)
        c = conn.cursor()

        ## Add file to table
        c.execute(
            '''INSERT INTO file (sample_id, process_id, file_path, file_name, file_md5, file_bytes, file_type) VALUES (?, ?, ?, ?, ?, ?, ?)''',
            (samid, procid, f, sample_name, file_md5, file_size, file_type))

        ## Commit and close the connection
        conn.commit()
        conn.close()
예제 #8
0
    def fit(self, dataset):
        bar = tqdm(dataset, smoothing=0)
        avgDLoss = Average('DL', num=4)
        realRealAcc = Average('DR', num=4)
        avgGLoss = Average('GL', num=4)
        fakeRealAcc = Average('GR', num=4)
        realIlluAcc = Average('TI', num=4)
        fakeIlluAcc = Average('GI', num=4)
        for i, (x, y) in enumerate(bar):
            self.step += 1
            batchSZ = y.size(0)
            x, y = [createVariable(z, self.use_cuda) for z in [x, y]]
            true = createVariable(torch.ones(batchSZ).float(), self.use_cuda)
            false = createVariable(torch.zeros(batchSZ).float(), self.use_cuda)

            # lr decay
            if self.step % 50000 == 0:
                for param_group in self.optimD.param_groups:
                    param_group['lr'] = param_group['lr'] * 0.5
                for param_group in self.optimG.param_groups:
                    param_group['lr'] = param_group['lr'] * 0.5

            #  tagger pretrain
            #  if self.step < 4000:
            #  self.G.eval()
            #  self.D.train()
            #  self.optimD.zero_grad()
            #  dloss = 0

            #  # Real data
            #  isReal, tags = self.D(x)
            #  lossHair = F.cross_entropy(tags[:, 0, :], y[:, 0])
            #  lossEyes = F.cross_entropy(tags[:, 1, :], y[:, 1])
            #  realHairAcc.append(toList((torch.max(tags[:, 0, :], 1)[1] == y[:, 0]).sum())[0] / batchSZ)
            #  realEyesAcc.append(toList((torch.max(tags[:, 1, :], 1)[1] == y[:, 1]).sum())[0] / batchSZ)
            #  lossRealTags = lossHair * 0.6 + lossEyes
            #  loss = lossRealTags
            #  dloss += loss.data.cpu().numpy().tolist()[0]
            #  loss.backward()

            #  # Gradient penalty
            #  alpha = createVariable(torch.rand(batchSZ, 1, 1, 1), self.use_cuda)
            #  beta = createVariable(torch.randn(x.size()), self.use_cuda)
            #  gradientPenalty = 0

            #  x = alpha * x + (1 - alpha) * (x + 0.5 * x.std() * beta)
            #  x = x.detach()
            #  x.requires_grad = True
            #  isReal, tags = self.D(x)
            #  hair = tags[:,0,:12]
            #  eyes = tags[:,1,:11]

            #  hairGrad = createVariable(torch.ones(batchSZ, 12).float(), self.use_cuda)
            #  hairGrad = grad(hair, x, hairGrad, create_graph=True,
            #  retain_graph=True, only_inputs=True)[0].view(batchSZ, -1)
            #  gradientPenalty += ((hairGrad.norm(p=2, dim=1) - 1)**2).mean()

            #  eyesGrad = createVariable(torch.ones(batchSZ, 11).float(), self.use_cuda)
            #  eyesGrad = grad(eyes, x, eyesGrad, create_graph=True,
            #  retain_graph=True, only_inputs=True)[0].view(batchSZ, -1)
            #  gradientPenalty += ((eyesGrad.norm(p=2, dim=1) - 1)**2).mean()

            #  gradientPenalty *= 0.5
            #  dloss += gradientPenalty.data.cpu().numpy().tolist()[0]
            #  gradientPenalty.backward()

            #  avgDLoss.append(dloss)
            #  torch.nn.utils.clip_grad_norm(self.D.parameters(), 1)
            #  self.optimD.step()
            #  logs = logging((avgDLoss, avgGLoss, realRealAcc, fakeRealAcc, realHairAcc, fakeHairAcc, realEyesAcc, fakeEyesAcc))
            #  bar.desc = logs
            #  continue

            lambdaAdvMax = 1
            #  lambdaAdv = min(1, self.step / 4000) ** 2
            #  lambdaAdv = lambdaAdv * 0.8 + 0.2
            #  lambdaAdv = lambdaAdv * lambdaAdvMax
            lambdaAdv = lambdaAdvMax

            skipD = False

            if lambdaAdv >= lambdaAdvMax - 1e-10:
                # gap skip
                gap = max(realRealAcc.value() - fakeRealAcc.value(), 0)
                gap = min(1, gap * 2)
                r = random.random()
                if r > 1 - gap * 0.9:
                    skipD = True
                pass

            if not skipD:
                for _ in range(1):
                    # Training Discriminator
                    self.G.eval()
                    self.D.train()
                    self.optimD.zero_grad()
                    self.optimG.zero_grad()
                    dloss = 0

                    # Real data
                    isReal, illum = self.D(x)
                    lossRealLabel = F.binary_cross_entropy_with_logits(
                        isReal, true)
                    realRealAcc.append(toList(F.sigmoid(isReal).mean())[0])
                    lossIllu = F.mse_loss(illum, y)
                    realIlluAcc.append(toList(lossIllu)[0])
                    loss = lossRealLabel * lambdaAdv + lossIllu
                    dloss += loss.data.cpu().numpy().tolist()[0]
                    loss.backward()

                    # Gradient penalty
                    alpha = createVariable(torch.rand(batchSZ, 1, 1, 1),
                                           self.use_cuda)
                    beta = createVariable(torch.randn(x.size()), self.use_cuda)
                    gradientPenalty = 0

                    x = alpha * x + (1 - alpha) * (x + 0.5 * x.std() * beta)
                    x = x.detach()
                    x.requires_grad = True
                    isReal, illum = self.D(x)
                    #  isReal = F.sigmoid(isReal)

                    realGrad = grad(isReal,
                                    x,
                                    true,
                                    create_graph=True,
                                    retain_graph=True,
                                    only_inputs=True)[0].view(batchSZ, -1)
                    gradientPenalty += ((realGrad.norm(p=2, dim=1) -
                                         1)**2).mean()

                    gradientPenalty *= 0.5
                    dloss += gradientPenalty.data.cpu().numpy().tolist()[0]
                    gradientPenalty.backward()

                    # Fake data
                    noise = createVariable(torch.randn(batchSZ, noiseDim),
                                           self.use_cuda)
                    illum = createVariable(
                        torch.FloatTensor(batchSZ).uniform_(0.3, 1),
                        self.use_cuda)

                    x = self.G(noise, illum)
                    #  x = torch.clamp(x, 0, 1)
                    x = x.detach()

                    isReal, illum = self.D(x)
                    lossRealLabel = F.binary_cross_entropy_with_logits(
                        isReal, false)

                    loss = lossRealLabel * lambdaAdv
                    loss = loss * 0.1
                    dloss += loss.data.cpu().numpy().tolist()[0]
                    loss.backward()

                    # Fake data history
                    if len(self.memory) > batchSZ:
                        x = random.sample(self.memory, batchSZ)
                        x = createVariable(torch.stack(x, 0), self.use_cuda)

                        isReal, illum = self.D(x)
                        lossRealLabel = F.binary_cross_entropy_with_logits(
                            isReal, false)

                        loss = lossRealLabel * lambdaAdv
                        loss = loss * 0.9
                        dloss += loss.data.cpu().numpy().tolist()[0]
                        loss.backward()

                    avgDLoss.append(dloss)
                    torch.nn.utils.clip_grad_norm(self.D.parameters(), 1)
                    self.optimD.step()

            # Training Generator
            for i in range(1):
                self.optimD.zero_grad()
                self.optimG.zero_grad()
                self.D.eval()
                self.G.train()
                noise = createVariable(torch.randn(batchSZ, noiseDim),
                                       self.use_cuda)
                illum = createVariable(
                    torch.FloatTensor(batchSZ).uniform_(0.3, 1), self.use_cuda)
                gloss = 0

                x = self.G(noise, illum)
                isReal, _illum = self.D(x)

                self.memory.append(x[0].data.cpu())
                if len(self.memory) > 1e6: self.memory = self.memory[-1e6:]

                if self.step % 15 == 0 and i == 0:
                    img = x.data[0].cpu().numpy()
                    img, org = toImage(img)
                    try:
                        img.save(
                            os.path.join('output', 'training', 'norm',
                                         '%d-0.jpg' % (self.step)))
                        org.save(
                            os.path.join('output', 'training', 'orig',
                                         '%d-0.jpg' % (self.step)))
                    except:
                        pass

                lossRealLabel = F.binary_cross_entropy_with_logits(
                    isReal, true)
                fakeRealAcc.append(toList(F.sigmoid(isReal).mean())[0])

                lossIllu = F.mse_loss(_illum, illum)
                fakeIlluAcc.append(toList(lossIllu)[0])
                loss = lossRealLabel * lambdaAdv + lossIllu
                gloss += loss.data.cpu().numpy().tolist()[0]
                loss.backward()

                avgGLoss.append(gloss)
                torch.nn.utils.clip_grad_norm(self.G.parameters(), 1)
                self.optimG.step()

            logs = logging((avgDLoss, avgGLoss, realRealAcc, fakeRealAcc,
                            realIlluAcc, fakeIlluAcc))
            bar.desc = logs

        bar.close()
        return [
            avgDLoss, avgGLoss, realRealAcc, fakeRealAcc, realIlluAcc,
            fakeIlluAcc
        ]