Ejemplo n.º 1
0
    def finetune(self, model, summary):
        """Finetune the given model on a "dataset" produced from chunks of the given summary.
        Args:
            model (BertForMaskedLM): a BERT for masked language modeling torch model
            summary (str): the summary to finetune on
        """
        model.train()

        all_inputs = self.prepare_finetuning_data(summary)
        input_batches = batch_data(all_inputs, self.finetune_batch_size)

        no_decay = ["bias", "LayerNorm.weight"]
        optimizer_grouped_parameters = [
            {
                "params": [
                    p for n, p in model.named_parameters()
                    if not any(nd in n for nd in no_decay)
                ],
                "weight_decay":
                1e-2,
            },
            {
                "params": [
                    p for n, p in model.named_parameters()
                    if any(nd in n for nd in no_decay)
                ],
                "weight_decay":
                0.0,
            },
        ]
        optimizer = AdamW(optimizer_grouped_parameters,
                          lr=self.learning_rate,
                          eps=1e-8)
        scheduler = get_linear_schedule_with_warmup(
            optimizer,
            num_warmup_steps=self.warmup_steps,
            num_training_steps=len(input_batches) * self.finetune_epochs,
        )

        for epoch in range(self.finetune_epochs):
            for input_batch in input_batches:
                input_ids, attention_mask, token_type_ids, labels = get_input_tensors(
                    input_batch,
                    device=self.device,
                    tokenizer=self.model_tokenizer,
                )

                model.zero_grad()
                optimizer.zero_grad()
                loss, _ = model(
                    input_ids=input_ids,
                    attention_mask=attention_mask,
                    token_type_ids=token_type_ids,
                    masked_lm_labels=labels,
                )
                loss.backward()
                optimizer.step()
                scheduler.step()

        model.eval()
Ejemplo n.º 2
0
    def run_inference_batch(self, model, batch):
        """Run an inference batch through the provided model
        Args:
            model (BertForMaskedLM): a BERT for masked language modeling torch model
            batch (List[BertInput]): the input batch to run through the model
        Returns:
            all_predictions (List[Dict[int, str]]): predicted tokens for every masked token in
                the inputs
        """
        input_ids, attention_mask, token_type_ids, _ = get_input_tensors(
            batch,
            device=self.device,
            tokenizer=self.model_tokenizer,
        )

        with torch.no_grad():
            (model_output_batch, ) = model(
                input_ids=input_ids,
                attention_mask=attention_mask,
                token_type_ids=token_type_ids,
            )

        all_predictions = []
        for model_input, model_output in zip(batch, model_output_batch):
            predictions = {}
            for idx in model_input.masked_idxs:
                predicted_id = model_output[idx].argmax()
                (predicted_token,
                 ) = self.model_tokenizer.convert_ids_to_tokens([predicted_id])
                predictions[idx] = predicted_token
            all_predictions.append(predictions)

        return all_predictions
Ejemplo n.º 3
0
        def get_input_color_and_depth_data(self):
            color_img, depth_img = self.get_camera_data()
            color_img = get_prepared_img(color_img, 'rgb')
            depth_img = get_prepared_img(depth_img, 'depth')
            color_heightmap, depth_heightmap = get_heightmap(color_img, depth_img, robot.cam_intrinsics, robot.cam_pose, robot.workspace_limits, robot.heightmap_resolution)
            valid_depth_heightmap = depth_heightmap.copy()
            valid_depth_heightmap[np.isnan(valid_depth_heightmap)] = 0
            input_color_data, input_depth_data = get_input_tensors(color_heightmap, valid_depth_heightmap)

            return input_color_data, input_depth_data
Ejemplo n.º 4
0
    def __init__(
            self,
            img,
            model_path='./bodypix_resnet50_float_model-stride16/model.json',
            output_stride=16):
        print("[INFO] Loading model...")
        self.graph = load_graph_model(model_path)
        print("[INFO] Loaded model...")
        self.output_stride = output_stride

        self.img = img

        # Get input and output tensors
        self.input_tensor_names = get_input_tensors(self.graph)
        print(self.input_tensor_names)
        self.output_tensor_names = get_output_tensors(self.graph)
        print(self.output_tensor_names)
        self.input_tensor = self.graph.get_tensor_by_name(
            self.input_tensor_names[0])
targetWidth = (int(imgWidth) // OutputStride) * OutputStride + 1
targetHeight = (int(imgHeight) // OutputStride) * OutputStride + 1

print(imgHeight, imgWidth, targetHeight, targetWidth)
img = img.resize((targetWidth, targetHeight))
x = tf.keras.preprocessing.image.img_to_array(img, dtype=np.float32)
InputImageShape = x.shape
print("Input Image Shape in hwc", InputImageShape)

widthResolution = int((InputImageShape[1] - 1) / OutputStride) + 1
heightResolution = int((InputImageShape[0] - 1) / OutputStride) + 1
print('Resolution', widthResolution, heightResolution)

# Get input and output tensors
input_tensor_names = get_input_tensors(graph)
print(input_tensor_names)
output_tensor_names = get_output_tensors(graph)
print(output_tensor_names)
input_tensor = graph.get_tensor_by_name(input_tensor_names[0])

# Preprocessing Image
# For Resnet
if any('resnet_v1' in name for name in output_tensor_names):
    # add imagenet mean - extracted from body-pix source
    m = np.array([-123.15, -115.90, -103.06])
    x = np.add(x, m)
# For Mobilenet
elif any('MobilenetV1' in name for name in output_tensor_names):
    x = (x / 127.5) - 1
else: