def __init__(self, num_classes=10): super(ImperativeLenetWithSkipQuant, self).__init__() conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1") conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2") fc_w1_attr = fluid.ParamAttr(name="fc_w_1") fc_w2_attr = fluid.ParamAttr(name="fc_w_2") fc_w3_attr = fluid.ParamAttr(name="fc_w_3") conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1") conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2") fc_b1_attr = fluid.ParamAttr(name="fc_b_1") fc_b2_attr = fluid.ParamAttr(name="fc_b_2") fc_b3_attr = fluid.ParamAttr(name="fc_b_3") self.conv2d_0 = Conv2D(in_channels=1, out_channels=6, kernel_size=3, stride=1, padding=1, weight_attr=conv2d_w1_attr, bias_attr=conv2d_b1_attr) self.conv2d_0.skip_quant = True self.batch_norm_0 = BatchNorm2D(6) self.relu_0 = ReLU() self.pool2d_0 = MaxPool2D(kernel_size=2, stride=2) self.conv2d_1 = Conv2D(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0, weight_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr) self.conv2d_1.skip_quant = False self.batch_norm_1 = BatchNorm2D(16) self.relu6_0 = ReLU6() self.pool2d_1 = MaxPool2D(kernel_size=2, stride=2) self.linear_0 = Linear(in_features=400, out_features=120, weight_attr=fc_w1_attr, bias_attr=fc_b1_attr) self.linear_0.skip_quant = True self.leaky_relu_0 = LeakyReLU() self.linear_1 = Linear(in_features=120, out_features=84, weight_attr=fc_w2_attr, bias_attr=fc_b2_attr) self.linear_1.skip_quant = False self.sigmoid_0 = Sigmoid() self.linear_2 = Linear(in_features=84, out_features=num_classes, weight_attr=fc_w3_attr, bias_attr=fc_b3_attr) self.linear_2.skip_quant = False self.softmax_0 = Softmax()
def __init__(self, num_classes=10, classifier_activation='softmax'): super(LeNet, self).__init__() self.num_classes = num_classes self.features = Sequential(Conv2d(1, 6, 3, stride=1, padding=1), ReLU(), Pool2D(2, 'max', 2), Conv2d(6, 16, 5, stride=1, padding=0), ReLU(), Pool2D(2, 'max', 2)) if num_classes > 0: self.fc = Sequential(Linear(400, 120), Linear(120, 84), Linear(84, 10), Softmax()) #Todo: accept any activation
def __init__(self, num_classes=10): super(ImperativeLenet, self).__init__() conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1") conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2") fc_w1_attr = fluid.ParamAttr(name="fc_w_1") fc_w2_attr = fluid.ParamAttr(name="fc_w_2") fc_w3_attr = fluid.ParamAttr(name="fc_w_3") conv2d_b1_attr = fluid.ParamAttr(name="conv2d_b_1") conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2") fc_b1_attr = fluid.ParamAttr(name="fc_b_1") fc_b2_attr = fluid.ParamAttr(name="fc_b_2") fc_b3_attr = fluid.ParamAttr(name="fc_b_3") self.features = Sequential( Conv2D( in_channels=1, out_channels=6, kernel_size=3, stride=1, padding=1, weight_attr=conv2d_w1_attr, bias_attr=conv2d_b1_attr), Pool2D( pool_size=2, pool_type='max', pool_stride=2), Conv2D( in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0, weight_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr), Pool2D( pool_size=2, pool_type='max', pool_stride=2)) self.fc = Sequential( Linear( in_features=400, out_features=120, weight_attr=fc_w1_attr, bias_attr=fc_b1_attr), Linear( in_features=120, out_features=84, weight_attr=fc_w2_attr, bias_attr=fc_b2_attr), Linear( in_features=84, out_features=num_classes, weight_attr=fc_w3_attr, bias_attr=fc_b3_attr), Softmax())
def __init__(self, num_classes=10): super(ImperativeLenet, self).__init__() conv2d_w1_attr = fluid.ParamAttr(name="conv2d_w_1") conv2d_w2_attr = fluid.ParamAttr(name="conv2d_w_2") fc_w1_attr = fluid.ParamAttr(name="fc_w_1") fc_w2_attr = fluid.ParamAttr(name="fc_w_2") fc_w3_attr = fluid.ParamAttr(name="fc_w_3") conv2d_b2_attr = fluid.ParamAttr(name="conv2d_b_2") fc_b1_attr = fluid.ParamAttr(name="fc_b_1") fc_b2_attr = fluid.ParamAttr(name="fc_b_2") fc_b3_attr = fluid.ParamAttr(name="fc_b_3") self.features = Sequential( Conv2D(in_channels=1, out_channels=6, kernel_size=3, stride=1, padding=1, weight_attr=conv2d_w1_attr, bias_attr=False), BatchNorm2D(6), ReLU(), MaxPool2D(kernel_size=2, stride=2), Conv2D(in_channels=6, out_channels=16, kernel_size=5, stride=1, padding=0, weight_attr=conv2d_w2_attr, bias_attr=conv2d_b2_attr), BatchNorm2D(16), PReLU(), MaxPool2D(kernel_size=2, stride=2)) self.fc = Sequential( Linear(in_features=400, out_features=120, weight_attr=fc_w1_attr, bias_attr=fc_b1_attr), LeakyReLU(), Linear(in_features=120, out_features=84, weight_attr=fc_w2_attr, bias_attr=fc_b2_attr), Sigmoid(), Linear(in_features=84, out_features=num_classes, weight_attr=fc_w3_attr, bias_attr=fc_b3_attr), Softmax()) self.add = paddle.nn.quant.add() self.quant_stub = paddle.nn.quant.QuantStub()
def __init__(self, mode="train"): ClasModel = paddle.nn.Sequential( Conv2D(3, 6, (3, 3)), BatchNorm2D(6), ReLU(), Conv2D(6, 6, (3, 3)), BatchNorm2D(6), ReLU(), MaxPool2D((2, 2)), Conv2D(6, 12, (3, 3)), BatchNorm2D(12), ReLU(), Conv2D(12, 12, (3, 3)), BatchNorm2D(12), ReLU(), MaxPool2D((2, 2)), Conv2D(12, 8, (3, 3)), BatchNorm2D(8), ReLU(), Conv2D(8, 8, (3, 3)), BatchNorm2D(8), ReLU(), MaxPool2D((2, 2)), Flatten(), Linear(128, 128), ReLU(), Linear(128, 32), ReLU(), Linear(32, 2), Softmax(), ) input = InputSpec([None, 3, 64, 64], "float32", "x") label = InputSpec([None, 1], "int32", "label") model = paddle.Model(ClasModel, inputs=input, labels=label) model.prepare( paddle.optimizer.Adam(parameters=ClasModel.parameters()), paddle.nn.CrossEntropyLoss(), paddle.metric.Accuracy(), ) self.model = model if mode == "predict": self.load_weight()
def __init__(self, num_classes, classifier_activation='softmax'): super(Classifier, self).__init__() self.linear1 = Linear(512 * 7 * 7, 4096) self.linear2 = Linear(4096, 4096) self.linear3 = Linear(4096, num_classes) self.act = Softmax() #Todo: accept any activation
from paddle.nn import Conv2D, BatchNorm2D, ReLU, Softmax, MaxPool2D, Flatten, Linear ClasModel = paddle.nn.Sequential(Conv2D(3, 6, (3, 3)), BatchNorm2D(6), ReLU(), Conv2D(6, 6, (3, 3)), BatchNorm2D(6), ReLU(), MaxPool2D((2, 2)), Conv2D(6, 12, (3, 3)), BatchNorm2D(12), ReLU(), Conv2D(12, 12, (3, 3)), BatchNorm2D(12), ReLU(), MaxPool2D((2, 2)), Conv2D(12, 8, (3, 3)), BatchNorm2D(8), ReLU(), Conv2D(8, 8, (3, 3)), BatchNorm2D(8), ReLU(), MaxPool2D((2, 2)), Flatten(), Linear(128, 128), ReLU(), Linear(128, 32), ReLU(), Linear(32, 2), Softmax()) train_dataset = HumanClasDataset(mode="train") eval_dataset = HumanClasDataset(mode="eval") # train_loader = paddle.io.DataLoader(train_dataset, batch_size=1, shuffle=True) model = paddle.Model(ClasModel) model.prepare(paddle.optimizer.Adam(parameters=ClasModel.parameters()), paddle.nn.CrossEntropyLoss(), paddle.metric.Accuracy()) model.fit(train_dataset, batch_size=32, epochs=10, verbose=2) paddle.save(ClasModel.static_dict(), "human.pdparams") model.evaluate(eval_dataset, verbose=1) img = cv2.imread("/home/aistudio/plane/bend/p/15351-撤轮挡-0_2937.png")
def __init__(self, classifier_activation='softmax'): super(MyModel, self).__init__() self._fc = Linear(20, 10) self._act = Softmax() #Todo: accept any activation