Esempio n. 1
0
def plot_model_exploration(model: ModelWrapper,
                           img: np.array,
                           min_z: float = -6,
                           max_z: float = 6,
                           num_steps: int = 10):
    img_height, img_width, channels = model.output_shape
    steps = np.linspace(-15, 15, num_steps)
    canvas = np.zeros(
        (model.latent_dim * img_height, num_steps * img_width, channels))
    fig, ax = plt.subplots()
    z = model.get_latent(img)

    for j in range(model.latent_dim):
        z_temp = np.copy(z)

        for i, step in enumerate(steps):
            z_temp[j] = step
            reconstruction = model.get_reconstruction(z_temp)
            canvas[j * img_width:(j + 1) * img_width,
                   i * img_height:(i + 1) * img_height] = reconstruction

    ax.imshow(canvas)
    start_range_y = img_height // 2
    end_range_y = model.latent_dim * img_height + start_range_y
    pixel_range_y = np.arange(start_range_y, end_range_y, img_height)
    ax.set_yticks(pixel_range_y)
    ax.set_yticklabels(list(range(model.latent_dim)), fontsize=8)
    ax.set_xticks([])
    ax.set_xticklabels([])

    return fig, ax
def prepare_model(learning_rate, momentum, checkpoint_file):
    """Prepare a ResNet-34 model with CrossEntropyLoss and SGD.

    Args:
        learning_rate (float): The learning rate for SGD.
        momentum (float): The momentum for SGD.
        checkpoint_file (str or None): If not `None`, the path of the
            checkpoint file to load.

    Returns:
        model.Model: The prepared model object.
    """
    # Load model.
    resnet = torchvision.models.resnet34()
    resnet.conv1 = torch.nn.Conv2d(1,
                                   64,
                                   kernel_size=3,
                                   stride=1,
                                   padding=1,
                                   bias=False)
    resnet.avgpool = torch.nn.AvgPool2d(2)

    # Prepare loss function and optimizer.
    loss_function = torch.nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(resnet.parameters(),
                                lr=learning_rate,
                                momentum=momentum)

    # Wrap model object and load checkpoint file if provided.
    model = ModelWrapper(resnet, loss_function, optimizer)
    if checkpoint_file:
        model.load(checkpoint_file)

    return model
    def __init__(self, data_frame, label_name, feature_names,\
        categorical_feature_names, k=5):
        ModelWrapper.__init__(self, data_frame, \
            label_name, feature_names, categorical_feature_names)

        self.k = k
        self._kfold = None
        self._generate_kfold()
Esempio n. 4
0
def train(args):
    loss_fn = nn.CrossEntropyLoss(ignore_index=0)
    adaptive = ModelWrapper(args, loss_fn, get_loader)
    optimizer = torch.optim.Adam(filter(lambda p: p.requires_grad,
                                        adaptive.model.parameters()),
                                 lr=args.learning_rate,
                                 betas=(args.alpha, args.beta),
                                 weight_decay=args.l2_rate)
    if adaptive.train(optimizer, args):
        return 0
Esempio n. 5
0
def main(args):
    loss_fn = nn.CrossEntropyLoss(ignore_index=0)
    adaptive = ModelWrapper(args, loss_fn)

    with open(args.word2idx_path, 'r') as fr:
        word2idx = json.loads(fr.read())
    with open(args.sememe2idx_path, 'r') as fr:
        sememe2idx = json.loads(fr.read())
    results = ResDataset(args.gen_file_path, word2idx, sememe2idx)
    res_loader = data.DataLoader(dataset=results, batch_size=1, shuffle=False)

    scores = adaptive.score(res_loader)

    with codecs.open(args.output_path, 'w', 'utf-8') as fw:
        fw.write('\n'.join(scores))
    return 0
Esempio n. 6
0
    def __init__(self, environment: Env,
                 memory: AbstractMemory,
                 policy: AbstractPolicy,
                 model: K.Model,
                 optimizer: K.optimizers.Optimizer,
                 logger: Logger):
        self.environment = environment
        self.memory = memory
        self.policy = policy
        self.model = ModelWrapper(model, optimizer)
        self.logger = logger

        self.history = []

        logger.create_settings_model_file(model)
        logger.create_settings_agent_file(self)
Esempio n. 7
0
def upload():
    print 'uploading...'
    fin = request.files['file']
    data = fin.read()
    user_id = request.form['openid']
    user_dir = get_user_dir(user_id)
    if not os.path.isdir(user_dir):
        os.mkdir(user_dir)  # TODO: delete zombie cache

    img = cv2.imdecode(np.fromstring(data, dtype=np.uint8), cv2.IMREAD_COLOR)
    predicted = ModelWrapper.predict(img)
    assets = json.load(open(ASSETS_DIR + ASSETS_JSON_FILE))
    resp_dict = assets[predicted.decode('utf-8')]
    resp_dict.update({'predicted': predicted})
    resp = flask.jsonify(resp_dict)
    print predicted
    print resp

    with open(user_dir + CACHE_IMAGE_FILE, 'wb') as fout:
        fout.write(data)
    json_data = {
        'predicted': predicted,
        'description': resp_dict['description']
    }
    json.dump(json_data, open(user_dir + CACHE_JSON_FILE, 'w'))
    return resp
Esempio n. 8
0
def predict(batch: Batch):
    # Remove empty texts
    batch.texts = list(filter(lambda x: len(x) > 0, batch.texts))
    model = ModelWrapper()
    res = model(batch)

    return {"documents": res}
Esempio n. 9
0
    def get(self, id):
        m = ModelWrapper("pickle_model.pkl")

        customer = data[data.cuid == id]

        if len(customer) == 0:
            print("Customer not found")
            return {"conv": 0, "revenue": 0}

        print("Customer found")
        res = m.predict(customer)

        print("res[0]", res[0], "res[1]", res[1])

        mes = metrics(customer)

        return {
            "conv": res[0][0],
            "revenue": str(res[1].values[0]),
            "message": mes
        }
Esempio n. 10
0
    def __init__(self):
        """ Khởi động ứng dụng """
        super(UiOutputDialog, self).__init__()
        self.model_wrapper = ModelWrapper(model_path)

        self.capture = None

        self.timer = QTimer(self)
        self.timer.timeout.connect(self.update_frame)

        loadUi("window.ui", self)

        self.holistic = mp.solutions.holistic.Holistic()
        self.face = mp.solutions.face_mesh.FaceMesh(max_num_faces=100)
        self.hand = mp.solutions.hands.Hands(max_num_hands=100)

        now = QDate.currentDate()
        current_date = now.toString('ddd dd MMMM yyyy')
        current_time = datetime.datetime.now().strftime("%I:%M %p")
        self.Date_Label.setText(current_date)
        self.Time_Label.setText(current_time)

        self.image = None

        self.settings = UISettings()
        self.Settings_Button.clicked.connect(
            lambda: self.handle_setting_button())

        self.warnings = []
        self.prev_waring_code = -1
        self.prev_waring_code_1 = -1
        self.prev_waring_code_2 = -1
        self.warning_history = UIWarnings()
        self.Warning_History.clicked.connect(
            lambda: self.handle_waring_history_button())

        self.net = cv2.dnn.readNet("yolov3-tiny.weights", "yolov3-tiny.cfg")
        with open("yolov3.txt", 'r') as f:
            self.classes = [line.strip() for line in f.readlines()]
        self.colors = np.random.uniform(0, 255, size=(len(self.classes), 3))
Esempio n. 11
0
    def test_can_fit_model(self):
        """ This test check ability of fitting model in PER to random vector. """
        state_shape = (4, )
        action_space = 2

        model = PrioritizedExperienceReplayTests._create_model(
            state_shape, action_space)
        PER = PrioritizedExperienceReplay(maxlen=1,
                                          model=model,
                                          key_scaling=10,
                                          gamma=1)
        model_wrapper = ModelWrapper(
            model=model, optimizer=K.optimizers.Adam(learning_rate=0.01))
        model_wrapper.compile()

        sample = Sample(action=np.random.randint(0, action_space),
                        state=np.random.rand(state_shape[0]),
                        reward=10,
                        next_state=None)
        PER.add(samples=[sample])

        history_of_loss = []
        fit_vector = np.zeros((action_space, ))
        fit_vector[sample.action] = sample.reward
        for _ in range(100):
            model_wrapper.fit(sample.state, fit_vector)
            history_of_loss.append(PER._loss_calculate(sample=sample))

        for idx, loss in enumerate(history_of_loss[:-1]):
            self.assertGreater(loss, history_of_loss[idx + 1])
Esempio n. 12
0
    def __init__(self,
                 environment: Env,
                 memory: AbstractMemory,
                 policy: AbstractPolicy,
                 model: K.Model,
                 logger: Logger,
                 gamma: float,
                 optimizer: K.optimizers.Optimizer,
                 n_step: int = 1):

        self.model = ModelWrapper(model, optimizer)
        #self.model.compile()
        self.current_model = None

        self.gamma = gamma
        self.n_step = n_step

        super(DQN, self).__init__(environment=environment,
                                  memory=memory,
                                  policy=policy,
                                  model=model,
                                  optimizer=optimizer,
                                  logger=logger)
    RNN(HIDDEN_SIZE),
    layers.RepeatVector(3),
    RNN(128, return_sequences=True),
    layers.TimeDistributed(layers.Dense(len(CHARS), activation='softmax'))
])

model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.summary()

train_generator = encode_generator(training_generator, BATCH_SIZE)

hist = model.fit_generator(train_generator,
                           steps_per_epoch=STEPS_PER_EPOCH,
                           epochs=EPOCHS,
                           verbose=1,
                           use_multiprocessing=True,
                           workers=-2,
                           callbacks=callbacks,
                           validation_data=train_generator, validation_steps=30)

score = model.evaluate_generator(encode_generator(
    test_generator, BATCH_SIZE), steps=STEPS_PER_EPOCH)
print(score)

config = build_config(MODEL_NAME, LEARNING_RATE, BATCH_SIZE,
                      EPOCHS, STEPS_PER_EPOCH, score[0], score[1])
wrapper = ModelWrapper(model, config=config)
wrapper.save_model()
        shuffle=True,
        collate_fn=data.image_label_list_of_masks_collate_function)
    validation_dataset_fid = DataLoader(
        data.Places365(path_to_index_file=args.path_to_places365,
                       index_file_name='val.txt',
                       max_length=6000,
                       validation=True),
        batch_size=args.batch_size,
        num_workers=args.batch_size,
        shuffle=False,
        collate_fn=data.image_label_list_of_masks_collate_function)
    validation_dataset = data.Places365(
        path_to_index_file=args.path_to_places365, index_file_name='val.txt')
    # Init model wrapper
    model_wrapper = ModelWrapper(
        generator=generator,
        discriminator=discriminator,
        vgg16=vgg16,
        training_dataset=training_dataset,
        validation_dataset=validation_dataset,
        validation_dataset_fid=validation_dataset_fid,
        generator_optimizer=generator_optimizer,
        discriminator_optimizer=discriminator_optimizer)
    # Perform training
    if args.train:
        model_wrapper.train(epochs=args.epochs, device=args.device)
    # Perform testing
    if args.test:
        print('FID=', model_wrapper.validate(device=args.device))
        model_wrapper.inference(device=args.device)
Esempio n. 15
0
class UiOutputDialog(QDialog):
    def __init__(self):
        """ Khởi động ứng dụng """
        super(UiOutputDialog, self).__init__()
        self.model_wrapper = ModelWrapper(model_path)

        self.capture = None

        self.timer = QTimer(self)
        self.timer.timeout.connect(self.update_frame)

        loadUi("window.ui", self)

        self.holistic = mp.solutions.holistic.Holistic()
        self.face = mp.solutions.face_mesh.FaceMesh(max_num_faces=100)
        self.hand = mp.solutions.hands.Hands(max_num_hands=100)

        now = QDate.currentDate()
        current_date = now.toString('ddd dd MMMM yyyy')
        current_time = datetime.datetime.now().strftime("%I:%M %p")
        self.Date_Label.setText(current_date)
        self.Time_Label.setText(current_time)

        self.image = None

        self.settings = UISettings()
        self.Settings_Button.clicked.connect(
            lambda: self.handle_setting_button())

        self.warnings = []
        self.prev_waring_code = -1
        self.prev_waring_code_1 = -1
        self.prev_waring_code_2 = -1
        self.warning_history = UIWarnings()
        self.Warning_History.clicked.connect(
            lambda: self.handle_waring_history_button())

        self.net = cv2.dnn.readNet("yolov3-tiny.weights", "yolov3-tiny.cfg")
        with open("yolov3.txt", 'r') as f:
            self.classes = [line.strip() for line in f.readlines()]
        self.colors = np.random.uniform(0, 255, size=(len(self.classes), 3))

    def handle_waring_history_button(self):
        self.pause_video()
        self.warning_history.handle_show(self.warnings)

    def handle_setting_button(self):
        self.pause_video()
        self.settings.show()

    def init_video(self, camera):
        self.capture = camera

    def start_video(self):
        self.timer.start(1)

    def pause_video(self):
        self.timer.stop()

    def process_frame(self, img):

        skeletons = self.model_wrapper.process_image(img)

        skeleton_drawer = vis.SkeletonDrawer(img, draw_config)
        for skeleton in skeletons:
            skeleton.draw_skeleton(skeleton_drawer.joint_draw,
                                   skeleton_drawer.kpt_draw)
        return img

    def face_rec(self, image):
        """
        Nhận diện dáng người và tay từ ảnh đã cho
        :param image: ảnh từ camera
        :return: image: ảnh đuọc xử lý sau khi nhận diện
        """
        width = image.shape[1]
        height = image.shape[0]

        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)

        raw_img = image
        image = self.process_frame(image)

        # Bắt đầu nhận diện
        faces = self.face.process(raw_img)
        hands = self.hand.process(raw_img)

        image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

        if not faces.multi_face_landmarks:
            self.Warnings_List.clear()
            return image

        count = 0
        for face_landmarks in faces.multi_face_landmarks:
            if self.settings.print_results:
                draw_landmarks(image, face_landmarks,
                               mp.solutions.face_mesh.FACEMESH_CONTOURS)
            """
            nose_tip 1
            chin = 199
            left_eye_outer = 249
            right_eye_outer = 7
            mouth_left = 291
            mouth_right = 61
            """
            nose_tip = normalize_to_pixel_coordinates(
                face_landmarks.landmark[1].x, face_landmarks.landmark[1].y,
                width, height)
            chin = normalize_to_pixel_coordinates(
                face_landmarks.landmark[199].x, face_landmarks.landmark[199].y,
                width, height)
            left_eye_outer = normalize_to_pixel_coordinates(
                face_landmarks.landmark[249].x, face_landmarks.landmark[249].y,
                width, height)
            right_eye_outer = normalize_to_pixel_coordinates(
                face_landmarks.landmark[7].x, face_landmarks.landmark[7].y,
                width, height)
            mouth_left = normalize_to_pixel_coordinates(
                face_landmarks.landmark[291].x, face_landmarks.landmark[291].y,
                width, height)
            mouth_right = normalize_to_pixel_coordinates(
                face_landmarks.landmark[61].x, face_landmarks.landmark[61].y,
                width, height)

            if chin is None or left_eye_outer is None or right_eye_outer is None or mouth_left is None or mouth_right is None:
                return image

            image_points = np.array([
                nose_tip, chin, left_eye_outer, right_eye_outer, mouth_left,
                mouth_right
            ],
                                    dtype="double")

            focal_length = width
            camera_matrix = np.array(
                [[focal_length, 0, width / 2], [0, focal_length, height / 2],
                 [0, 0, 1]],
                dtype="double")

            dist_coeffs = np.zeros((4, 1))
            (success, rotation_vector,
             translation_vector) = cv2.solvePnP(model_points,
                                                image_points,
                                                camera_matrix,
                                                dist_coeffs,
                                                flags=cv2.SOLVEPNP_ITERATIVE)

            if self.Warnings_List.topLevelItemCount() <= count:
                item = QTreeWidgetItem(["Thí sinh #" + str(count + 1)])
                item.addChildren([QTreeWidgetItem(), QTreeWidgetItem()])
                self.Warnings_List.addTopLevelItem(item)
                self.Warnings_List.topLevelItem(count).setExpanded(True)

            rotation_mat, _ = cv2.Rodrigues(rotation_vector)
            pose_mat = cv2.hconcat((rotation_mat, translation_vector))
            _, _, _, _, _, _, euler_angles = cv2.decomposeProjectionMatrix(
                pose_mat)
            angle = round(abs(euler_angles[1][0]), 2)
            self.Warnings_List.topLevelItem(count).child(0).setText(
                0, "Đầu di chuyển ngang " + str(angle) + " độ so với camera")
            color = int(angle / self.settings.head_angle_limit * 255)
            if color > 255:
                if self.prev_waring_code_1 != 3:
                    self.warnings.append(
                        QDateTime.currentDateTime().toString() + " - " +
                        "Thí sinh #" + str(count + 1) +
                        " - : Đầu di chuyển ngang " + str(angle) +
                        " độ so với camera")
                    self.prev_waring_code_1 = 3
                color = 255
            self.Warnings_List.topLevelItem(count).child(0).setBackground(
                0, QColor(color, 255 - color, 0))

            angle = round(abs(euler_angles[2][0]), 2)
            self.Warnings_List.topLevelItem(count).child(1).setText(
                0, "Đầu di chuyển dọc " + str(angle) + " độ so với camera")
            color = int(angle / self.settings.head_angle_limit * 255)
            if color > 255:
                if self.prev_waring_code_2 != 5:
                    self.warnings.append(
                        QDateTime.currentDateTime().toString() + " - " +
                        "Thí sinh #" + str(count + 1) +
                        " - : Đầu di chuyển dọc " + str(angle) +
                        " độ so với camera")
                    self.prev_waring_code_2 = 5
                color = 255
            self.Warnings_List.topLevelItem(count).child(1).setBackground(
                0, QColor(color, 255 - color, 0))

            if self.settings.print_results:
                (nose_end_point2D,
                 jacobian) = cv2.projectPoints(np.array([
                     (0.0, 0.0, 500.0)
                 ]), rotation_vector, translation_vector, camera_matrix,
                                               dist_coeffs)
                '''
                for p in image_points:
                    cv2.circle(image, (int(p[0]), int(p[1])), 2, (0, 0, 255), -1)
                '''

                cv2.line(image,
                         (int(image_points[0][0]), int(image_points[0][1])),
                         (int(nose_end_point2D[0][0][0]),
                          int(nose_end_point2D[0][0][1])), (255, 0, 0), 2)
            count += 1

        while self.Warnings_List.topLevelItemCount() > len(
                faces.multi_face_landmarks):
            self.Warnings_List.takeTopLevelItem(len(
                faces.multi_face_landmarks))

        if not hands.multi_hand_landmarks:
            for i in range(len(faces.multi_face_landmarks)):
                if self.Warnings_List.topLevelItem(i).childCount() == 2:
                    self.Warnings_List.topLevelItem(i).addChild(
                        QTreeWidgetItem())
                self.Warnings_List.topLevelItem(i).child(2).setText(
                    0, "Không tìm thấy tay!")
                self.Warnings_List.topLevelItem(i).child(2).setBackground(
                    0, QColor("red"))
                if self.prev_waring_code != 0:
                    self.warnings.append(
                        QDateTime.currentDateTime().toString() + " - " +
                        "Thí sinh #" + str(i + 1) + " - : Không tìm thấy tay!")
                    self.prev_waring_code = 0
            return image

        face_list = {}
        for hand_landmarks in hands.multi_hand_landmarks:
            if self.settings.print_results:
                draw_landmarks(image, hand_landmarks,
                               mp.solutions.hands.HAND_CONNECTIONS)

            hand_x = hand_landmarks.landmark[0].x
            hand_y = hand_landmarks.landmark[0].y

            best_face = -1
            best_face_value = 1

            count = 0
            for face_landmarks in faces.multi_face_landmarks:
                if count in face_list and face_list[count] == 2:
                    count += 1
                    continue
                face_x = face_landmarks.landmark[0].x
                face_y = face_landmarks.landmark[0].y
                dist = math.sqrt(
                    pow(abs(face_x - hand_x), 2) +
                    pow(abs(face_y - hand_y), 2))
                if dist < best_face_value:
                    best_face_value = dist
                    best_face = count
                count += 1
            if best_face != -1:
                if best_face in face_list:
                    face_list[best_face] = 2
                else:
                    face_list[best_face] = 1

        for i in range(len(faces.multi_face_landmarks)):
            if self.Warnings_List.topLevelItem(i).childCount() == 2:
                self.Warnings_List.topLevelItem(i).addChild(QTreeWidgetItem())
            if i in face_list:
                if face_list[i] == 1:
                    self.Warnings_List.topLevelItem(i).child(2).setText(
                        0, "Chỉ phát hiện được một tay!")
                    self.Warnings_List.topLevelItem(i).child(2).setBackground(
                        0, QColor("yellow"))
                    if self.prev_waring_code != 1:
                        self.warnings.append(
                            QDateTime.currentDateTime().toString() + " - " +
                            "Thí sinh #" + str(count + 1) +
                            " - Chỉ phát hiện được một tay!")
                        self.prev_waring_code = 1
                else:
                    if self.Warnings_List.topLevelItem(i).childCount() == 3:
                        self.Warnings_List.topLevelItem(i).takeChild(2)
            else:
                self.Warnings_List.topLevelItem(i).child(2).setText(
                    0, "Không tìm thấy tay!")
                self.Warnings_List.topLevelItem(i).child(2).setBackground(
                    0, QColor("red"))
                if self.prev_waring_code != 0:
                    self.warnings.append(
                        QDateTime.currentDateTime().toString() + " - " +
                        "Thí sinh #" + str(count + 1) +
                        " - : Không tìm thấy tay!")
                    self.prev_waring_code = 0
        """
        blob = cv2.dnn.blobFromImage(raw_img, 0.00392, (416, 416), (0, 0, 0), True, crop=False)
        self.net.setInput(blob)
        outs = self.net.forward(get_output_layers(self.net))

        class_ids = []
        confidences = []
        boxes = []

        for out in outs:
            for detection in out:
                scores = detection[5:]
                class_id = np.argmax(scores)
                confidence = scores[class_id]
                if confidence > 0.5:
                    center_x = int(detection[0] * width)
                    center_y = int(detection[1] * height)
                    w = int(detection[2] * width)
                    h = int(detection[3] * height)
                    x = center_x - w / 2
                    y = center_y - h / 2
                    class_ids.append(class_id)
                    confidences.append(float(confidence))
                    boxes.append([x, y, w, h])

        indices = cv2.dnn.NMSBoxes(boxes, confidences, 0.5, 0.4)

        for i in indices:
            i = i[0]
            box = boxes[i]
            x = box[0]
            y = box[1]
            w = box[2]
            h = box[3]

            color = self.colors[class_ids[i]]
            cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
            cv2.putText(image, str(self.classes[class_ids[i]]), (x - 10, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, color,
                        2)
        """
        return image

    def update_frame(self):
        """
        Cập nhật hình ảnh lên ứng dụng
        """
        ret, self.image = self.capture.read()
        self.display_image(self.image, 1)

    def display_image(self, image, windowed=1):
        """
        Phát hình ảnh đã được xử lý lên ứng dụng
        :param image: ảnh từ camera
        :param windowed: số window đang hiện
        """
        image = cv2.resize(image, (640, 480))
        image = cv2.flip(image, 1)
        try:
            image = self.face_rec(image)
        except Exception as e:
            print(e)
        qformat = QImage.Format_Indexed8
        if len(image.shape) == 3:
            if image.shape[2] == 4:
                qformat = QImage.Format_RGBA8888
            else:
                qformat = QImage.Format_RGB888
        out_image = QImage(image, image.shape[1], image.shape[0],
                           image.strides[0], qformat)
        out_image = out_image.rgbSwapped()

        if windowed == 1:
            self.imgLabel.setPixmap(QPixmap.fromImage(out_image))
            self.imgLabel.setScaledContents(True)
Esempio n. 16
0
from tensorflow.keras import Model
import os
from utils import display_activation
from model_wrapper import ModelWrapper
from data_generator import test_generator


def reshape_data(a):
    x, _, _ = a
    x = x.reshape(-1, 28, 84, 1)
    return x


def data_flatten(generator, batch_size=32):
    return map(reshape_data, generator(batch_size=batch_size))


X = next(data_flatten(test_generator, 512))

for model_dir in os.listdir('classification_model_results'):
    try:
        model = ModelWrapper.load_model(
            'classification_model_results/{}'.format(model_dir))
        layer_outputs = [layer.output for layer in model.layers]
        activation_model = Model(inputs=model.input, outputs=layer_outputs)
        activations = activation_model.predict(X, verbose=1)
        display_activation(activations,
                           'classification_model_results/{}'.format(model_dir))
    except:
        print(model_dir)
Esempio n. 17
0
def main():
    # Grab the dataset from scikit-learn
    data = datasets.load_iris()
    X = data['data']
    y = data['target']
    target_names = data['target_names']
    feature_names = [
        f.replace(' (cm)', '').replace(' ', '_') for f in data.feature_names
    ]
    X_train, X_test, y_train, y_test = train_test_split(X,
                                                        y,
                                                        test_size=0.3,
                                                        random_state=42)
    # Build and train the model
    model = RandomForestClassifier(random_state=101)
    model.fit(X_train, y_train)
    print("Score on the training set is: {:2}".format(
        model.score(X_train, y_train)))
    print("Score on the test set is: {:.2}".format(model.score(X_test,
                                                               y_test)))

    # CHANGES HERE >>>>>>>>>>
    X_mean = X_train.mean(axis=0).round(1)
    feature_defaults = dict(zip(feature_names, X_mean.tolist()))
    wrapped = ModelWrapper(model_name='iris-rf',
                           model_version=MODEL_VERSION,
                           model_object=model,
                           class_labels=target_names.tolist(),
                           feature_defaults=feature_defaults)

    # Save the model
    model_filename = 'iris-rf-v{}.pkl'.format(MODEL_VERSION)
    print("Saving model to {}...".format(model_filename))
    joblib.dump(wrapped, model_filename)

    # ***** Generate test data *****
    print('Generating test data...')
    all_probs = model.predict_proba(X_test)
    all_test_cases = prep_test_cases(X_test, all_probs, feature_names,
                                     target_names)
    test_data_fname = 'testdata_iris_v{}.json'.format(MODEL_VERSION)
    with open(test_data_fname, 'w') as fout:
        json.dump(all_test_cases, fout)

    # ***** Generate test data (Missing) *****
    print('Generating test data with missing values...')
    missing_grps = [(0, ), (1, ), (2, ), (0, 1), (0, 2), (1, 2), (0, 1, 2)]
    X_mean = X_train.mean(axis=0).round(1)
    all_features = []
    all_probs = []
    for missing_cols in missing_grps:
        X_missing = X_test.copy().astype('object')
        X_scored = X_test.copy()
        for col in missing_cols:
            X_missing[:, col] = None
            X_scored[:, col] = X_mean[col]
        # Use the imputed one to find expected probabilities
        all_probs.extend(model.predict_proba(X_scored))
        all_features.extend(X_missing)

    all_test_cases_missing = prep_test_cases(all_features, all_probs,
                                             feature_names, target_names)

    test_data_fname = 'testdata_iris_missing_v{}.json'.format(MODEL_VERSION)
    with open(test_data_fname, 'w') as fout:
        json.dump(all_test_cases_missing, fout)
Esempio n. 18
0
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time    : 17/11/14 PM2:21
# @Author  : shaoguang.csg
# @File    : main.py

from parse_conf import DataConf, ModelConf
from model_wrapper import ModelWrapper
from utils.logger import logger
from time import time

start = time()

data_conf = DataConf()
model_conf = ModelConf()

model = ModelWrapper(data_conf=data_conf, model_conf=model_conf)
model.train()
logger.info(model.get_weight())
model.evaluate()
result = model.predict()

end = time()

logger.info('time: {}'.format(end - start))

# 2 core 1 threads 116
# 1 core  228
Esempio n. 19
0
model_list = os.listdir(model_dir)
print('\nFind model:')

for num, model in enumerate(model_list):
    print(num, model)

model_name = model_list[int(input('\nChoose a model (enter a number): '))]
print('Load Model {}'.format(model_name))

model_path = os.path.join(model_dir, model_name)

learning_rate = float(model_name.split('-')[2])
img_size = int(model_name.split('-')[3])

model = ModelWrapper(learning_rate, img_size).model
model.load(os.path.join(model_path, model_name))
print('Model loaded!')

test_set_processed_data_path = os.path.join(processed_data_dir,
                                            'test_{}.npy'.format(img_size))
if os.path.exists(test_set_processed_data_path):
    test_set_data = np.load(test_set_processed_data_path)
    print('Data loaded!')
else:
    test_set_data = process_data.process_test_set_data(
        img_size, test_set_raw_data_dir, test_set_processed_data_path)
    print('Data processed!')

fig = plt.figure()
Esempio n. 20
0
class AbstractAgent:

    def __init__(self, environment: Env,
                 memory: AbstractMemory,
                 policy: AbstractPolicy,
                 model: K.Model,
                 optimizer: K.optimizers.Optimizer,
                 logger: Logger):
        self.environment = environment
        self.memory = memory
        self.policy = policy
        self.model = ModelWrapper(model, optimizer)
        self.logger = logger

        self.history = []

        logger.create_settings_model_file(model)
        logger.create_settings_agent_file(self)

    def _explore_env(self, batch_size: int, number_of_game: int = 10) -> Tuple[float, List[Sample]]:
        """ Return tuple of mean gain from all games and list of samples. """
        data = []
        gains = []
        state = self.environment.reset()
        previous_sample = None

        current_gain = 0
        n_sample = 0
        n_game = 0

        while n_game <= number_of_game or n_sample <= batch_size:
            q_values = self.model.predict(state)
            action = self.policy(q_values)

            next_state, reward, done, _ = self.environment.step(action)

            current_gain += reward

            if previous_sample is None:
                previous_sample = Sample(state, action, next_state, reward)
            else:
                current_sample = Sample(state, action, next_state, reward)
                previous_sample.next_sample = current_sample
                data.append(previous_sample)
                previous_sample = current_sample

            if done:
                data.append(current_sample)
                gains.append(current_gain)
                current_gain = 0
                previous_sample = None
                state = self.environment.reset()
                n_game += 1
            else:
                state = next_state
            n_sample += 1

        self.environment.close()
        return np.mean(gains), rand.sample(data, batch_size)

    def learn(self, epochs: int,
              batch_size_in_step: int,
              min_n_game_in_exploration: int,
              batch_size_in_exploration: int,
              change_model_delay: int):
        raise NotImplementedError

    def __str__(self):
        raise NotImplementedError
Esempio n. 21
0
    shots = args.shots
    token_limit = args.token_limit
    model_type = args.model
    model_size = args.model_size
    data_size = args.data_size
    lr_base = args.lr
    eval_data_size = args.eval_data_size
    eval_batch_size = args.eval_batch_size
    train_batch_size = args.train_batch_size
    local_rank = args.local_rank
    fp16 = args.fp16

    device, n_gpu = setup_device(local_rank)
    if model_type == "bert":
        lm = ModelWrapper('bert',
                          f'bert-{model_size}-uncased',
                          token_limit=token_limit,
                          device=device)
    elif model_type == "roberta":
        lm = ModelWrapper('roberta',
                          f'roberta-{model_size}',
                          token_limit=token_limit,
                          device=device)
    elif model_type == "longformer":
        lm = ModelWrapper('longformer',
                          f'allenai/longformer-{model_size}-4096',
                          token_limit=token_limit,
                          device=device)
    else:
        raise KeyError(f"model type {model_type} not supported")
    if do_mlm:
        classes = [['yes', 'right'], ['maybe'], ['wrong', 'no']]
Esempio n. 22
0
class DQN(AbstractAgent):
    def __init__(self,
                 environment: Env,
                 memory: AbstractMemory,
                 policy: AbstractPolicy,
                 model: K.Model,
                 logger: Logger,
                 gamma: float,
                 optimizer: K.optimizers.Optimizer,
                 n_step: int = 1):

        self.model = ModelWrapper(model, optimizer)
        #self.model.compile()
        self.current_model = None

        self.gamma = gamma
        self.n_step = n_step

        super(DQN, self).__init__(environment=environment,
                                  memory=memory,
                                  policy=policy,
                                  model=model,
                                  optimizer=optimizer,
                                  logger=logger)

    def _bellman_equation(self, batch: List[Sample]) -> np.ndarray:
        state = np.array([sample.state for sample in batch])
        q_values = self.current_model.predict(state)

        for idx in range(q_values.shape[0]):
            q_values[idx][batch[idx].action] = batch[idx].reward
            if not batch[idx].is_done():
                best_action_for_q_next = np.argmax(
                    self.model.predict(batch[idx].next_state))
                q_next = self.model.predict(
                    batch[idx].next_state)[0][best_action_for_q_next]
                q_values[idx][batch[idx].action] += self.gamma * q_next

        return q_values

    def learn(self, epochs: int, batch_size_in_step: int,
              min_n_game_in_exploration: int, batch_size_in_exploration: int,
              change_model_delay: int):

        self.model.compile()
        self.current_model = self.model.clone()
        self.current_model.compile()

        eval_score, starting_experience = self._explore_env(self.memory.maxlen)
        self.memory.add(starting_experience)

        for epoch in tqdm(range(epochs), desc='Learning in progress: '):

            if epoch % change_model_delay == 0:
                self.model = self.current_model.clone()
                self.model.compile()
                if type(self.memory) == PrioritizedExperienceReplay:
                    self.memory.update_model(self.model)
                eval_score, batch = self._explore_env(
                    batch_size_in_exploration, min_n_game_in_exploration)
                self.memory.add(batch)
            batch = self.memory.sample(batch_size_in_step)

            q_values = self._bellman_equation(batch)
            state = np.array([sample.state for sample in batch])
            loss = self.current_model.fit(state, q_values)
            self.policy.update()
            self.logger.add_event({
                'loss_value': loss,
                'mean_gain': eval_score,
                'epoch': epoch
            })

    def __str__(self):
        return "Agent: " + self.__class__.__name__ + "\n\n" + \
               "Discount value: " + str(self.gamma) + "\n"\
                "N-step: " + str(self.n_step) + "\n\n"\
                "Environment:\n" + str(self.environment) + "\n\n" + \
                "Memory:\n" + str(self.memory) + "\n" + \
                "Policy:\n" + str(self.policy)
Esempio n. 23
0
     shuffle=True)
 validation_dataset = DataLoader(
     CellInstanceSegmentation(path=os.path.join(args.path_to_data, "val"),
                              augmentation_p=0.0, two_classes=not args.three_classes),
     collate_fn=collate_function_cell_instance_segmentation, batch_size=1, num_workers=1, shuffle=False)
 test_dataset = DataLoader(
     CellInstanceSegmentation(path=os.path.join(args.path_to_data, "test"),
                              augmentation_p=0.0, two_classes=not args.three_classes),
     collate_fn=collate_function_cell_instance_segmentation, batch_size=1, num_workers=1, shuffle=False)
 # Model wrapper
 model_wrapper = ModelWrapper(detr=detr,
                              detr_optimizer=detr_optimizer,
                              detr_segmentation_optimizer=detr_segmentation_optimizer,
                              training_dataset=training_dataset,
                              validation_dataset=validation_dataset,
                              test_dataset=test_dataset,
                              loss_function=InstanceSegmentationLoss(
                                  segmentation_loss=SegmentationLoss(),
                                  ohem=args.ohem,
                                  ohem_faction=args.ohem_fraction),
                              device=device)
 # Perform training
 if args.train:
     model_wrapper.train(epochs=args.epochs,
                         optimize_only_segmentation_head_after_epoch=args.only_train_segmentation_head_after_epoch)
 # Perform validation
 if args.val:
     model_wrapper.validate(number_of_plots=30)
 # Perform testing
 if args.test:
     model_wrapper.test()
Esempio n. 24
0
#

#
flag_load_data = True
# data
if flag_load_data:
    dataset.load_preprocessed_data()
else:
    dataset.pretrained_emb_file = None
    dataset.emb_dim = 200
    dataset.max_seq_len = 200
    dataset.prepare_preprocessed_data(load_vocab=False)
#
data_train, data_test = dataset.split_train_and_test()
#

#
config = ModelSettings()
config.vocab = dataset.vocab
config.model_tag = model_tag
config.model_graph = build_graph
config.is_train = True
config.check_settings()

#
model = ModelWrapper(config)
model.prepare_for_train_and_valid()
#
model.train_and_valid(data_train, data_test)
#
Esempio n. 25
0
if not os.path.exists(processed_data_dir):
    os.makedirs(processed_data_dir)

train_set_processed_data_path = os.path.join(processed_data_dir,
                                             'train_{}.npy'.format(img_size))
if os.path.exists(train_set_processed_data_path):
    train_set_data = np.load(train_set_processed_data_path)
    print('Data loaded!')
else:
    train_set_data = process_data.process_train_set_data(
        img_size, train_set_raw_data_dir, train_set_processed_data_path)
    print('Data processed!')

model_wrapper = ModelWrapper(learning_rate,
                             img_size,
                             tensorboard_dir=tensorboard_dir)
model = model_wrapper.model

model_path = os.path.join(model_dir, model_wrapper.name)
if os.path.exists(model_path):
    model.load(os.path.join(model_path, model_wrapper.name))
    print('Model loaded!')

train_set_data, validation_set_data = train_set_data[:-500], train_set_data[
    -500:]

train_x = np.array([i[0]
                    for i in train_set_data]).reshape(-1, img_size, img_size,
                                                      1)
train_y = [i[1] for i in train_set_data]
#
# data
dataset = Dataset()
dataset.load_vocab_tokens_and_emb()
#

#
config = ModelSettings()
config.vocab = dataset.vocab
config.model_tag = model_tag
config.model_graph = build_graph
config.is_train = False
config.check_settings()
#
model = ModelWrapper(config)
model.prepare_for_prediction()
#

text_raw = ["这本书不错"]
"""
work_book = xlrd.open_workbook(file_raw)
data_sheet = work_book.sheets()[0]
text_raw = data_sheet.col_values(0)
"""

#
preds_list = []
logits_list = []
#
for item in text_raw:
Esempio n. 27
0
def generate(args):
    # Load word2idx
    adaptive = ModelWrapper(args, data_loader=get_loader)
    if adaptive.generate(args):
        return 0