def predict(opt):
    # net=torch.load('Lenet.pth') # pth格式 只保留参数
    # print('net', net)
    '''
    需要将basic_option中的is_train 修改为false
    '''
    # opt.is_train = False
    acc = 0
    total = 0
    test_dataloader = create_dataloader(opt)
    net = Classification()
    net.load_state_dict(
        torch.load(
            f"./output/train/weights/exp_1/Basic_Epoch_20_Accuracy_0.99.pth"))
    net = net.to(device)
    with torch.no_grad():
        for index, data in enumerate(test_dataloader, start=1):
            images, labels = data
            images, labels = images.to(device), labels.to(device)
            outputs = net(images)
            _, predicted = torch.max(outputs.data, dim=1)
            print(f'number {index} picture maybe : {classes[predicted[0]]}')
            total += labels.size(0)
            acc += (predicted == labels).sum().item()
    print('Accuracy on test set : {}%'.format(100 * acc / total))
Пример #2
0
    def test_get_season(self):

        # create two seasons
        classification = Classification(label='test mens')
        classification.save()
        competition = Competition(
            name='div 1',
            mode='l',
            classification=classification
        )
        competition.save()
        season_1 = Season(label='s1',
            start_date=datetime.date.today(),
            end_date=datetime.date.today() + datetime.timedelta(365),
            competition=competition,
            published=True
        )
        season_2 = Season(label='s2',
            start_date=datetime.date.today() + datetime.timedelta(365),
            end_date=datetime.date.today() + datetime.timedelta(730),
            competition=competition
        )
        season_1.save()
        season_2.save()

        self.assertIn(season_1,
            Season.get_current_season_by_slugs('test-mens', 'div-1'))
        self.assertNotIn(season_2,
            Season.get_current_season_by_slugs('test-mens', 'div-1'))
Пример #3
0
    def __init__(self,
                 adj_matrix,
                 features=None,
                 labels=None,
                 supervised=False,
                 model='gat',
                 n_layer=2,
                 emb_size=128,
                 random_state=1234,
                 device='auto',
                 epochs=5,
                 batch_size=20,
                 sample_size=10,
                 lr=0.7,
                 unsup_loss_type='margin',
                 print_progress=True):
        super(GNN, self).__init__()
        # fix random seeds
        random.seed(random_state)
        np.random.seed(random_state)
        torch.manual_seed(random_state)
        torch.cuda.manual_seed_all(random_state)
        # set parameters
        self.supervised = supervised
        self.lr = lr
        self.epochs = epochs
        self.batch_size = batch_size
        self.sample_size = sample_size
        self.unsup_loss_type = unsup_loss_type
        self.print_progress = print_progress
        self.gat = True if model == 'gat' else False
        # set device
        if device == 'auto':
            self.device = torch.device(
                "cuda" if torch.cuda.is_available() else "cpu")
        else:
            self.device = device

        # load data
        self.dl = DataLoader(adj_matrix, features, labels, supervised,
                             self.device)

        self.gnn = GraphSage(n_layer,
                             emb_size,
                             batch_size,
                             sample_size,
                             self.dl,
                             self.device,
                             gat=self.gat)
        self.gnn.to(self.device)

        if supervised:
            n_classes = len(set(labels))
            self.classification = Classification(emb_size, n_classes)
            self.classification.to(self.device)
Пример #4
0
 def cl(self):
     if self._cl:
         return self._cl
     if self.clid:
         clid = self.clid
     else:
         clName = self.request.REQUEST.get('cl')
         if clName:
             self.clid = clid = clName
         else:
             return False
     cl = memcache.get(clid.encode('utf8'))
     if cl is None:
         if clid.isdigit() and len(clid)<4:
             logger.warn("User requested a random classification, not fully implemented")
             cl = classification.random(self.ts.numregions,min(int(clid)+2,MAX_n))
             self._cl = cl
             return cl
         else:
             cl = Classification.get_by_key_name(clid)
             if cl is not None and not memcache.add(clid.encode('utf8'), cl):
                 logger.error("Memcache set failed [ %s ]"%clid)
     if cl is not None:
         if cl.public or cl.owner == self.request.user.username:
             self._cl = cl
             return self._cl
     return False
Пример #5
0
 def real_get(self):
     values = self.request.REQUEST.get('values')
     if not values and self.cl:
         C = Classification.get_by_key_name(self.clid)
         if C and hasattr(C,'notes'):
             values = C.notes
         else:
             values = ','.join(map(str,range(self.cl.n)))
     if self.cs and values:
         delim = self.request.REQUEST.get('delim')
         if not delim: delim = ','
         title = self.request.REQUEST.get('title')
         values = values.split(delim)
         s = '<div style="width:170px; background-color:#cccccc; padding:5px;">'
         if title: s+='<h3 style="margin:0;border:0;padding:0;">%s</h3>'%title
         #for i in range(2,self.cs.n):
         for i,val in enumerate(values):
             r,g,b = map(ord,self.cs.colors[i+2])
             s += '<div style="clear:both; float:left; border: 1px black solid; width:20px; height:20px; background-color:#%.2X%.2X%.2X;"> </div>'%(r,g,b)
             #s += '<div style="height:22px;">'+values[i-2]+'</div>'
             s += '<div style="height:22px;">'+val+'</div>'
         s += '</div>'
         callback = self.request.REQUEST.get('callback')
         if callback:
             s = callback+'({"legend":"%s"})'%s.replace('"','\\"')
         self.write(s)
Пример #6
0
 def remove_classifications(self):
     q = Classification.all(keys_only=True)
     q.filter('tileset',self.ts) 
     if q.count():
         db.delete(q.fetch(INDEX_BATCH_SIZE))
         self.write("<b>Remove Classifications:</b>Removing, refresh until done.<br />")
     else:
         self.write("<b>Remove Classifications: DONE.</b><br />")
Пример #7
0
 def cl(self):
     """ Search for the Classification Name in the Request and try and load it from Memcache or DataStore, return True or False """
     if self._cl:
         return self._cl
     classificationName = self.request.REQUEST.get('cl','DEFAULT')
     if classificationName == 'random':
         self.clid = clid = "cl:random"
     elif classificationName.isdigit():
         self.clid = clid = "cl:__digit:"+self.ts.name+":"+classificationName
     elif self.ts:
         self.clid = clid = "cl:"+self.ts.name+":"+classificationName
     else:
         return False
     cl = memcache.get(clid)
     if cl is not None:
         self._cl = cl
     else:
         cl = None
         if self.ts:
             N = self.ts.idlen
             if clid == 'cl:random':
                 cl = classification.random(N,min(N,MAX_n))
             elif clid.startswith('cl:__digit:'):
                 cl = classification.random(N,min(int(classificationName)+2,MAX_n))
             elif clid.startswith('cl:key_'):
                 C = Classification.get(classificationName[4:])
                 if C:
                     cl = classification.Classification(C.a)
             elif clid:
                 C = Classification.get_by_key_name(clid)
                 if C:
                     cl = classification.Classification(C.a)
             #if cl and clid != 'cl:random':
                 #if not memcache.add(clid, cl, 60):
                 #    logging.error("Memcache set failed [ %s ]"%clid)
             if not cl:
                 cl = classification.random(N,3)
         self._cl = cl
     if cl is not None:
         return cl
     else:
         return False
Пример #8
0
 def post_csv(self,bg=0,br=1):
     user = self.request.user
     POST = self.request.POST
     if 'dat' in POST:
         a = array.array(UNSIGNED_ITEM_TYPES[1])
         dat = map(int,POST['dat'].split(','))
         a.fromlist([bg,br])
         a.fromlist(dat)
         astring = a.tostring()
         cl = Classification("%s:%s"%(user.username,hashlib.md5(astring).hexdigest()))
         cl.tileset = self.ts.key_name
         cl.dat = zlib.compress(astring)
         cl.N = len(a)
         assert (cl.N-2) == self.ts.numregions
         cl.n = max(a)+1
         cl.owner = user.username
         cl.public = True
         cl.expires = False
         cl.date = time.mktime(time.gmtime())
         return cl.put()
def train(opt):

    device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
    train_dataloader, val_dataloader = create_dataloader(opt)
    net = Classification()  # 定义训练的网络模型
    net.to(device)
    net.train()
    loss_function = nn.CrossEntropyLoss()  # 定义损失函数为交叉熵损失函数
    optimizer = optim.Adam(net.parameters(), lr=0.001)  # 定义优化器(训练参数,学习率)

    for epoch in range(opt.num_epochs):  # 一个epoch即对整个训练集进行一次训练
        running_loss = 0.0
        correct = 0
        total = 0
        time_start = time.perf_counter()

        for step, data in enumerate(train_dataloader,
                                    start=0):  # 遍历训练集,step从0开始计算
            inputs, labels = data # 获取训练集的图像和标签
            inputs, labels = inputs.to(device), labels.to(device)
            optimizer.zero_grad()  # 清除历史梯度

            # forward + backward + optimize
            # outputs = net(inputs.permute(0,1,3,2))  # 正向传播
            outputs = net(inputs)  # 正向传播
            print('outputs.shape', outputs.shape, labels.shape)
            loss = loss_function(outputs, labels)  # 计算损失
            loss.backward()  # 反向传播
            optimizer.step()  # 优化器更新参数
            predict_y = torch.max(outputs, dim=1)[1]
            total += labels.size(0)
            correct += (predict_y == labels).sum().item()
            running_loss += loss.item()
            # print statistics

            # print('train_dataloader length: ', len(train_dataloader))
        acc = correct / total
        print('Train on epoch {}: loss:{}, acc:{}%'.format(epoch + 1, running_loss / total, 100 * correct / total))
        # 保存训练得到的参数
        if opt.model == 'basic':
            save_weight_name = os.path.join(opt.save_path,
                                            'Basic_Epoch_{0}_Accuracy_{1:.2f}.pth'.format(
                                                epoch + 1,
                                                acc))
        elif opt.model == 'plus':
            save_weight_name = os.path.join(opt.save_path,
                                            'Plus_Epoch_{0}_Accuracy_{1:.2f}.pth'.format(
                                                epoch + 1,
                                                acc))
        torch.save(net.state_dict(), save_weight_name)
    print('Finished Training')
Пример #10
0
 def real_get(self):
     ts = self.ts
     if ts:
         self.write("Found TileSet: "+ts.name)
         self.write("<br />")
         img_src = '/o.png?ts='+ts.name
         if self.request.get('cl'):
             img_src+= '&cl='+self.request.get('cl')
         if self.request.get('cs'):
             img_src+= '&cs='+self.request.get('cs')
         gmap = self.request.url.replace('/m/','/gmap/')
         self.write("<a href='%s'><img src='%s' /></a><br />"%(gmap,img_src))
         args = {}
         args['name'] = ts.name
         args['pub'] = "Public" if ts.public else "Private"
         args['cLat'] = ts.cLat
         args['cLng'] = ts.cLng
         args['maxZoom'] = ts.maxZoom
         self.write("<b>%(name)s</b> is a <b>%(pub)s</b> TileSet with a centriod of <b>(%(cLng).4f , %(cLat).4f)\
                     </b> and a maximium zoom level of <b>%(maxZoom)d</b>.<br />"%args)
         self.write("Notes: %s<br />"%ts.notes)
         self.write("Source: %s<br />"%ts.source)
         self.write("ID Spreadsheet: <a href='/ids.csv?ts=%s'>ids.csv </a>"%ts.name)
         self.write('<br><br><hr>')
         self.write('<a href="classify?ts=%s">Classify this Tile Set.</a>'%ts.name)
         q = Classification.all(keys_only=True)
         q.filter('tileset',ts)
         q.filter('public',True)
         if q.count():
             self.write("Classifications for this TileSet:<br>")
         #q.filter('expires',Flase)
         for cl_key in q:
             cl = Classification.get(cl_key)
             if cl.name:
                 cl = cl.name
             else:
                 cl = 'key_%s'%cl_key
             self.write('<A href="/m/?ts=%s&cl=%s">%s</a><br>'%(ts.name,cl,cl))
Пример #11
0
def create_models(samples, dictionary):

    neumf = NeuMF(len(samples), len(dictionary), 0.5, 8, [64, 32, 16, 8])
    attr_nets = dict()
    for key, labels in dictionary.items():
        if labels is None:
            # regression;
            attr_nets[key] = Regression(16)
        else:
            # classification
            # NOTE: class num doesnt include blank labels
            attr_nets[key] = Classification(16,
                                            len(labels) - 1)
    return neumf, attr_nets
Пример #12
0
 def get(self,clid=''):
     if clid:
         self.clid = clid
         cl = self.cl
         if cl:
             dat = cl.as_dict()
             out = {}
             for key,val in dat.iteritems():
                 try:
                     if issubclass(type(val),basestring):
                         out[key] = val.encode('utf8')
                     else:
                         out[key] = val
                 except UnicodeDecodeError: pass
             return self.write(out)
         else:
             return self.write({"error":"Classification Not Found"})
     else:
         return self.write({'classifications':Classification.select('owner',self.request.user.username,keys_only=True)})
    def add_classification(self):

        text = self.classificationNameInput.text()

        same_classifications = self.session.query(Classification).filter_by(
            name=text).all()

        if len(same_classifications):
            self.close()
            QMessageBox.warning(self, 'تحذير', 'التصنيف موجود بالفعل!')
            return

        classification = Classification(name=text)

        self.session.add(classification)
        self.session.commit()

        QMessageBox.information(self, 'عملية ناجحة', 'تمت الاضافة بنجاح')
        self.close()
Пример #14
0
    def parse_line(self, line):
        columns = line.split(self.HAZARDS_DELIMITER)

        # Check columns count
        if len(columns) != self.HAZARDS_COL_COUNT:
            self.error = 'Number of columns must be %d' % self.HAZARDS_COL_COUNT
            return False

        # Clean columns
        temp = []
        for col in columns:
            temp.append(col.strip())
        columns = temp

        # Check signal word
        if not HStatement.is_signal_word(columns[self.HAZARDS_COL_SIGNALWORD]):
            self.error = "'%s' is not a signal word!" % columns[
                self.HAZARDS_COL_SIGNALWORD]
            return False

        pictogram_names = columns[self.HAZARDS_COL_PICTOGRAM].strip()
        for pic_name in pictogram_names.split(','):
            pic_name = pic_name.strip()
            image_name = pic_name + self.HAZARDS_PICTOGRAM_TYPE
            image = None
            try:
                image = self.zip_file.read(image_name)
            except KeyError:
                self.error = 'Did not find <b>%s</b> in zip-file.' % image_name
                return False
            else:
                # Parse Pictogram
                pic = Pictogram.load(pic_name, image)

                # Parse H-Statement
                hstatement = HStatement.load(columns[self.HAZARDS_COL_CODE])
                hstatement.statement = columns[self.HAZARDS_COL_HSTATEMENT]
                hstatement.set_signal_word(
                    columns[self.HAZARDS_COL_SIGNALWORD])

                # Parse Class
                cls = Class.laod(columns[self.HAZARDS_COL_CLASS])
                cls.pictogram = pic.key

                # Parse Category (can be a list)
                cats = self.parse_category(columns[self.HAZARDS_COL_CATEGORY])
                if cats:
                    for cat in cats:
                        c = Classification(parent=CLASSIFICATION_KEY,
                                           clazz=cls.key,
                                           category=cat.key,
                                           hstatement=hstatement.key)
                        c.put()
                        cat.put()
                else:
                    self.error = "Category '%s' could not be understood." % columns[
                        self.HAZARDS_COL_CATEGORY]
                    return False

                # Store all the entities now where everything is fine
                pic.put()
                hstatement.put()
                cls.put()

        return True
Пример #15
0
    args.add_argument('--max_epoch', type=int, default=10)
    args.add_argument('--batch', type=int, default=2000)
    args.add_argument('--strmaxlen', type=int, default=200)
    args.add_argument('--embedding', type=int, default=8)

    # Select model
    args.add_argument('--model', type=str, default='classification', choices=['regression', 'classification'])
    config = args.parse_args()

    print('HAS_DATASET :', HAS_DATASET)
    print('IS_ON_NSML :', IS_ON_NSML)
    print('DATASET_PATH :', DATASET_PATH)

    model_type = {
        'regression' : Regression(config.embedding, config.strmaxlen),
        'classification' : Classification(config.embedding, config.strmaxlen),
    }

    model = model_type[config.model]
    if GPU_NUM:
        model = model.cuda()

    # DONOTCHANGE: Reserved for nsml use
    bind_model(model, config)

    criterion_type = {
        'regression' : nn.MSELoss(),
        'classification' : nn.CrossEntropyLoss(),
    }
    criterion = criterion_type[config.model]
    optimizer = optim.Adam(model.parameters(), lr=0.01)
Пример #16
0
def create_classification(label):

    classification = Classification(label=label)
    classification.save()

    return classification
Пример #17
0
class GNN(object):
    """Graph Neural Networks that can be easily called and used.
    Authors of this code package:
    Tong Zhao, [email protected]
    Tianwen Jiang, [email protected]
    Last updated: 11/25/2019
    Parameters
    ----------
    adj_matrix: scipy.sparse.csr_matrix
        The adjacency matrix of the graph, where nonzero entries indicates edges.
        The number of each nonzero entry indicates the number of edges between these two nodes.
    features: numpy.ndarray, optional
        The 2-dimension np array that stores given raw feature of each node, where the i-th row
        is the raw feature vector of node i.
        When raw features are not given, one-hot degree features will be used.
    labels: list or 1-D numpy.ndarray, optional
        The class label of each node. Used for supervised learning.
    supervised: bool, optional, default False
        Whether to use supervised learning.
    model: {'gat', 'graphsage'}, default 'gat'
        The GNN model to be used.
        - 'graphsage' is GraphSAGE: https://cs.stanford.edu/people/jure/pubs/graphsage-nips17.pdf
        - 'gat' is graph attention network: https://arxiv.org/pdf/1710.10903.pdf
    n_layer: int, optional, default 2
        Number of layers in the GNN
    emb_size: int, optional, default 128
        Size of the node embeddings to be learnt
    random_state, int, optional, default 1234
        Random seed
    device: {'cpu', 'cuda', 'auto'}, default 'auto'
        The device to use.
    epochs: int, optional, default 5
        Number of epochs for training
    batch_size: int, optional, default 20
        Number of node per batch for training
    lr: float, optional, default 0.7
        Learning rate
    unsup_loss_type: {'margin', 'normal'}, default 'margin'
        Loss function to be used for unsupervised learning
        - 'margin' is a hinge loss with margin of 3
        - 'normal' is the unsupervised loss function described in the paper of GraphSAGE
    print_progress: bool, optional, default True
        Whether to print the training progress
    """
    def __init__(self,
                 adj_matrix,
                 features=None,
                 labels=None,
                 supervised=False,
                 model='gat',
                 n_layer=2,
                 emb_size=128,
                 random_state=1234,
                 device='auto',
                 epochs=5,
                 batch_size=20,
                 lr=0.7,
                 unsup_loss_type='margin',
                 print_progress=True):
        super(GNN, self).__init__()
        # fix random seeds
        random.seed(random_state)
        np.random.seed(random_state)
        torch.manual_seed(random_state)
        torch.cuda.manual_seed_all(random_state)
        # set parameters
        self.supervised = supervised
        self.lr = lr
        self.epochs = epochs
        self.batch_size = batch_size
        self.unsup_loss_type = unsup_loss_type
        self.print_progress = print_progress
        self.gat = True if model == 'gat' else False
        # set device
        if device == 'auto':
            self.device = torch.device(
                "cuda" if torch.cuda.is_available() else "cpu")
        else:
            self.device = device

        # load data
        self.dl = DataLoader(adj_matrix, features, labels, supervised,
                             self.device)

        self.gnn = GraphSage(n_layer,
                             emb_size,
                             self.dl,
                             self.device,
                             gat=self.gat)
        self.gnn.to(self.device)

        if supervised:
            n_classes = len(set(labels))
            self.classification = Classification(emb_size, n_classes)
            self.classification.to(self.device)

    def fit(self):
        train_nodes = copy.deepcopy(self.dl.nodes_train)

        if self.supervised:
            labels = self.dl.labels
            models = [self.gnn, self.classification]
        else:
            unsup_loss = Unsup_Loss(self.dl, self.device)
            models = [self.gnn]
            if self.unsup_loss_type == 'margin':
                num_neg = 6
            elif self.unsup_loss_type == 'normal':
                num_neg = 100

        for epoch in range(self.epochs):
            np.random.shuffle(train_nodes)

            params = []
            for model in models:
                for param in model.parameters():
                    if param.requires_grad:
                        params.append(param)
            optimizer = torch.optim.SGD(params, lr=self.lr)
            optimizer.zero_grad()
            for model in models:
                model.zero_grad()

            batches = math.ceil(len(train_nodes) / self.batch_size)
            visited_nodes = set()
            for index in range(batches):
                if not self.supervised and len(visited_nodes) == len(
                        train_nodes):
                    # finish this epoch if all nodes are visited
                    break
                nodes_batch = train_nodes[index * self.batch_size:(index + 1) *
                                          self.batch_size]
                # extend nodes batch for unspervised learning
                if not self.supervised:
                    nodes_batch = np.asarray(
                        list(
                            unsup_loss.extend_nodes(nodes_batch,
                                                    num_neg=num_neg)))
                visited_nodes |= set(nodes_batch)
                # feed nodes batch to the GNN and returning the nodes embeddings
                embs_batch = self.gnn(nodes_batch)
                # calculate loss
                if self.supervised:
                    # superivsed learning
                    logists = self.classification(embs_batch)
                    labels_batch = labels[nodes_batch]
                    loss_sup = -torch.sum(
                        logists[range(logists.size(0)), labels_batch], 0)
                    loss_sup /= len(nodes_batch)
                    loss = loss_sup
                else:
                    # unsupervised learning
                    if self.unsup_loss_type == 'margin':
                        loss_net = unsup_loss.get_loss_margin(
                            embs_batch, nodes_batch)
                    elif self.unsup_loss_type == 'normal':
                        loss_net = unsup_loss.get_loss_sage(
                            embs_batch, nodes_batch)
                    loss = loss_net

                if self.print_progress:
                    logging.info(
                        'Epoch: [{}/{}],Step [{}/{}], Loss: {:.4f}, Dealed Nodes [{}/{}] '
                        .format(epoch + 1, self.epochs, index + 1, batches,
                                loss.item(), len(visited_nodes),
                                len(train_nodes)))

                loss.backward()
                for model in models:
                    nn.utils.clip_grad_norm_(model.parameters(), 5)
                optimizer.step()
                optimizer.zero_grad()
                for model in models:
                    model.zero_grad()

    def generate_embeddings(self):
        nodes = self.dl.nodes_train
        b_sz = 500
        batches = math.ceil(len(nodes) / b_sz)
        embs = []
        for index in range(batches):
            nodes_batch = nodes[index * b_sz:(index + 1) * b_sz]
            with torch.no_grad():
                embs_batch = self.gnn(nodes_batch)
            assert len(embs_batch) == len(nodes_batch)
            embs.append(embs_batch)
        assert len(embs) == batches
        embs = torch.cat(embs, 0)
        assert len(embs) == len(nodes)
        return embs.cpu().numpy()

    def predict(self):
        if not self.supervised:
            print('GNN.predict() is only supported for supervised learning.')
            sys.exit(0)
        nodes = self.dl.nodes_train
        b_sz = 500
        batches = math.ceil(len(nodes) / b_sz)
        preds = []
        for index in range(batches):
            nodes_batch = nodes[index * b_sz:(index + 1) * b_sz]
            with torch.no_grad():
                embs_batch = self.gnn(nodes_batch)
                logists = self.classification(embs_batch)
                _, predicts = torch.max(logists, 1)
                preds.append(predicts)
        assert len(preds) == batches
        preds = torch.cat(preds, 0)
        assert len(preds) == len(nodes)
        return preds.cpu().numpy()
Пример #18
0
batch_size = 16

train_data = gdata.DataLoader(cifar_train.transform_first(transform_train),
                              batch_size=batch_size,
                              shuffle=True)

transform_test = transforms.Compose([
    transforms.ToTensor(),
    transforms.Normalize([0.4914, 0.4822, 0.4465], [0.2023, 0.1994, 0.2010])
])

val_data = gluon.data.DataLoader(cifar_test.transform_first(transform_test),
                                 batch_size=batch_size,
                                 shuffle=False)

if __name__ == '__main__':
    ctx = mx.gpu()
    net = get_model('cifar_resnet20_v1', classes=10, pretrained=True)
    net.collect_params().reset_ctx(ctx)
    net.initialize(ctx=ctx)

    model = Classification(net=net, ctx=ctx)

    model.summary()

    history = model.fit(train_data, 1, val_data)

    history.plot()
    plt.legend()
    plt.show()
    # image size 3, 32, 32
    # batch size must be an even number
    # shuffle must be True

    cifar_10_train_dt = MyCustomDataset('data',
                                        download=True,
                                        transform=ToTensor())
    cifar_10_train_l = DataLoader(cifar_10_train_dt,
                                  batch_size=batch_size,
                                  shuffle=True,
                                  drop_last=True,
                                  pin_memory=torch.cuda.is_available())

    encoder = Encoder().to(device)
    loss_fn = DeepInfoMaxLoss(1, 0, 1).to(device)
    classification = Classification().to(device)
    encoder_optim = Adam(encoder.parameters(), lr=1e-4)
    loss_optim = Adam(loss_fn.parameters(), lr=1e-4)
    classification_optim = Adam(classification.parameters(), lr=1e-4)

    epoch_restart = 0
    root = Path(r'models')

    if epoch_restart > 0 and root is not None:
        enc_file = root / Path('encoder' + str(epoch_restart) + '.wgt')
        loss_file = root / Path('loss' + str(epoch_restart) + '.wgt')
        classification_loss_file = root / Path('classification_loss' +
                                               str(epoch_restart) + '.wgt')
        encoder.load_state_dict(torch.load(str(enc_file)))
        loss_fn.load_state_dict(torch.load(str(loss_file)))
        classification.load_state_dict(
Пример #20
0
 def real_post(self):
     """ Handles /classify POST requests/
         The POST data must contain "classifcation" and "ts"
         'classification': a string of integers in the range >=2 and <=255.
                             0 and 1 are reserved for background and borders
                             each integer represents the classID of a region,
                             the order of the class ids MUST match the order
                             of the IDS provided by the GET ?noform=1 handler.
     """
     if self.ts:
         cl = self.request.REQUEST.get('classification')
         b64 = self.request.REQUEST.get('b64encode')
         if not cl:
             self.write("Bad Request")
             return self.die(400)
         try:
             if b64 == 'True':
                 a = array.array('B')
                 a.fromstring(b64decode(cl))
                 cl = a.tolist()
             else:
                 cl = cl.strip().split(',')
                 cl = map(int,cl)
             assert len(cl) == self.ts.idlen
             cl = [0,1]+cl
             a = array.array(UNSIGNED_ITEM_TYPES[1])
             a.fromlist(cl)
             a = zlib.compress(a.tostring())
             name = self.request.REQUEST.get('name')
             exp = False
             if not name:
                 name = 'user_%s'%str(time()).replace('.','')
                 exp = True
             if name:
                 clid = 'cl:%s:%s'%(self.ts.name,name)
                 memcache.delete(clid)
                 C = Classification(key_name=clid)
                 C.name = name
                 C.expires = exp
             #else:
             #    key_name = 'cs:%s:%s'%(self.ts.name,str(time()).replace('.',''))
             #    C = Classification(key_name)
             C.tileset = self.ts.key()
             C.a = a
             C.N = len(cl)-2
             C.n = max(cl)+1
             title = self.request.REQUEST.get('title')
             if title:
                 C.title = title
             notes = self.request.REQUEST.get('notes')
             if notes:
                 C.notes = notes
             public = self.request.REQUEST.get('public')
             if public and public == 'False':
                 C.public = False
             elif public:
                 C.public = True
             try:
                 key = C.put()
             except CapabilityDisabledError:
                 logging.error("Capability has been disabled")
                 self.dir(500)
             callback = self.request.REQUEST.get("callback")
             if name:
                 if callback:
                     self.write('%s({"key":"%s"})'%(callback,name))
                 else:
                     self.write("Put: %s"%name)
             else:
                 if callback:
                     self.write('%s({"key":"%s"})'%(callback,key))
                 else:
                     self.write("Put: key_%s"%key)
         except:
             raise 
     else:
         self.die(400)
Пример #21
0
        x = self.features(x)
        x = self.output(x)
        return x


mnist_train = gdata.vision.FashionMNIST(train=True,
                                        root=r'../resource/fashion')
mnist_test = gdata.vision.FashionMNIST(train=False,
                                       root=r'../resource/fashion')

transform = gdata.vision.transforms.ToTensor()
train_iter = gdata.DataLoader(dataset=mnist_train.transform_first(transform),
                              shuffle=True,
                              batch_size=128)
test_iter = gdata.DataLoader(mnist_test.transform(transform), batch_size=128)

if __name__ == '__main__':
    ctx = mx.gpu()
    net = Net(classes=10)
    net.initialize(ctx=ctx)
    print(net)

    trainer = Trainer(net.collect_params(), 'adam', {'learning_rate': 0.01})
    fun = gloss.SoftmaxCrossEntropyLoss()

    model = Classification(neural=net, fun=fun, opt=trainer)

    model.train(mnist_train.transform_first(transform),
                batch_size=256,
                epochs=32)
Пример #22
0
    args.add_argument('--strmaxlen', type=int, default=200)
    args.add_argument('--embedding', type=int, default=300)
    args.add_argument('--model_name', type=str, default='RCNN')

    config = args.parse_args()
    config2 = Config()

    dataset = SentenceClassificationDataset('../data/processing_data',
                                            config.strmaxlen)

    print('unique labels = {}'.format(dataset.get_unique_labels_num()))
    print('vocab size = {}'.format(dataset.get_vocab_size()))

    if config.model_name == 'CNN':
        model = Classification(config.embedding, config.strmaxlen,
                               dataset.get_unique_labels_num(),
                               dataset.get_vocab_size())
        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.01)

    elif config.model_name == 'RCNN':
        model = RCNN(config.embedding, config.strmaxlen,
                     dataset.get_unique_labels_num(), dataset.get_vocab_size())
        if config.mode == 'train':
            model = model.cuda()

        criterion = nn.CrossEntropyLoss()
        optimizer = optim.Adam(model.parameters(), lr=0.01)

    elif config.model_name == "DUALRCNN":
        model = DualRCNN(config.embedding, config.strmaxlen,