def test_forward(self, batch_size: int, num_fields: int, embed_size: int): layer = ComposeExcitationNetworkLayer(num_fields=num_fields, reduction=1, activation=nn.ReLU6()) layer = layer.to(device) print( f'Input Size: {layer.inputs_size};\nOutput Size: {layer.outputs_size}' ) # Generate inputs for the layer field_aware_embed_inp = torch.rand(batch_size, num_fields**2, embed_size) field_aware_embed_inp.names = ( 'B', 'N', 'E', ) inp_size = field_aware_embed_inp.size() summary(layer, input_size=[inp_size], device=device, dtypes=[torch.float]) # Forward outputs = layer.forward(field_aware_embed_inp) self.assertEqual(outputs.size(), (batch_size, num_fields**2, embed_size)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def summary(self): torchinfo.summary(self, (1, self.latent_dim)) params = self.count_params() print( f"Loaded GAN with {params['generator']:,} generator params and {params['discriminator']:,} discriminator params." )
def test_forward(self, batch_size: int, num_fields: int, embed_size: int): model = MultiGateMixtureOfExpertsModel( embed_size=embed_size, num_fields=num_fields, num_tasks=4, num_experts=4, expert_output_size=1, expert_layer_sizes=[16, 16, 16], deep_layer_sizes=[16, 16, 16], expert_dropout_p=[0.9, 0.9, 0.9], deep_dropout_p=[0.9, 0.9, 0.9], expert_activation=nn.ReLU6(), deep_activation=nn.ReLU6()) model = model.to(device) # Generate inputs for the layer emb_inp = torch.rand(batch_size, num_fields, embed_size) emb_inp.names = ( 'B', 'N', 'E', ) emb_inp_size = emb_inp.size() summary(model, input_size=[emb_inp_size], device=device, dtypes=[torch.float]) # Forward outputs = model.forward(emb_inp) self.assertEqual(outputs.size(), (batch_size, 1)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, embed_size: int): model = NeuralCollaborativeFilteringModel( embed_size=embed_size, deep_output_size=8, deep_layer_sizes=[16, 16, 16], deep_dropout_p=[0.9, 0.9, 0.9], deep_activation=nn.ReLU6()) model = model.to(device) # Generate inputs for the layer emb_inp = torch.rand(batch_size, 2, embed_size) emb_inp.names = ( 'B', 'N', 'E', ) emb_inp_size = emb_inp.size() summary(model, input_size=[emb_inp_size], device=device, dtypes=[torch.float]) # Forward outputs = model.forward(emb_inp) self.assertEqual(outputs.size(), (batch_size, 1)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, num_fields: int, embed_size: int): output_size = 1 model = DeepAndCrossNetworkModel(inputs_size=embed_size, num_fields=num_fields, deep_output_size=4, deep_layer_sizes=[32, 16, 8], cross_num_layers=4, output_size=output_size, deep_dropout_p=[0.9, 0.9, 0.9], deep_activation=nn.ReLU6()) model = model.to(device) # Generate inputs for the layer emb_inp = torch.rand(batch_size, num_fields, embed_size) emb_inp.names = ( 'B', 'N', 'E', ) emb_inp_size = emb_inp.size() summary(model, input_size=[emb_inp_size], device=device, dtypes=[torch.float]) # Forward outputs = model.forward(emb_inp) self.assertEqual(outputs.size(), (batch_size, output_size)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, num_fields: int, embed_size: int): output_size = 1 model = LogisticRegressionModel(inputs_size=num_fields * embed_size, output_size=output_size) model = model.to(device) # Generate inputs for the layer emb_inp = torch.rand(batch_size, num_fields, embed_size) emb_inp.names = ( 'B', 'N', 'E', ) emb_inp_size = emb_inp.size() summary(model, input_size=[emb_inp_size], device=device, dtypes=[torch.float]) # Forward outputs = model.forward(emb_inp) self.assertEqual(outputs.size(), (batch_size, output_size)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, num_fields: int, embed_size: int): model = FieldAwareFactorizationMachineModel(num_fields=num_fields, dropout_p=0.9) model = model.to(device) # Generate inputs for the layer feat_inp = torch.rand(batch_size, num_fields, 1) feat_inp.names = ( 'B', 'N', 'E', ) feat_inp_size = feat_inp.size() field_emb_inp = torch.rand(batch_size, num_fields**2, embed_size) field_emb_inp.names = ( 'B', 'N', 'E', ) field_emb_inp_size = field_emb_inp.size() summary(model, input_size=[feat_inp_size, field_emb_inp_size], device=device, dtypes=[torch.float, torch.float]) # Forward outputs = model.forward(feat_inp, field_emb_inp) self.assertEqual(outputs.size(), (batch_size, 1)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, num_fields: int, embed_size: int): output_size = 1 model = FeatureImportanceAndBilinearFeatureInteractionNetwork( embed_size=embed_size, num_fields=num_fields, senet_reduction=4, deep_output_size=output_size, deep_layer_sizes=[16, 16, 16], bilinear_type='all', bilinear_bias=True, deep_dropout_p=[0.9, 0.9, 0.9], deep_activation=nn.ReLU6()) model = model.to(device) # Generate inputs for the layer emb_inp = torch.rand(batch_size, num_fields, embed_size) emb_inp.names = ( 'B', 'N', 'E', ) emb_inp_size = emb_inp.size() summary(model, input_size=[emb_inp_size], device=device, dtypes=[torch.float]) # Forward outputs = model.forward(emb_inp) self.assertEqual(outputs.size(), (batch_size, output_size)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, num_fields: int, embed_size: int): model = EntireSpaceMultiTaskModel(num_fields=num_fields, layer_sizes=[16, 16, 16], dropout_p=[0.9, 0.9, 0.9], activation=nn.ReLU()) model = model.to(device) # Generate inputs for the layer emb_inp = torch.rand(batch_size, num_fields, embed_size) emb_inp.names = ( 'B', 'N', 'E', ) emb_inp_size = emb_inp.size() summary(model, input_size=[emb_inp_size], device=device, dtypes=[torch.float]) # Forward pcvr, pctr = model.forward(emb_inp) self.assertEqual(pcvr.size(), (batch_size, 1)) self.assertEqual(pctr.size(), (batch_size, 1)) print(f'pcvr Size: {pcvr.size()}\n' f'pctr Size: {pctr.names}')
def test_forward(self, batch_size: int, num_fields: int, embed_size: int): model = ElaboratedEntireSpaceSupervisedMultiTaskModel( num_fields=num_fields, layer_sizes=[16, 16, 16], dropout_p=[0.9, 0.9, 0.9], activation=nn.ReLU()) model = model.to(device) # Generate inputs for the layer emb_inp = torch.rand(batch_size, num_fields, embed_size) emb_inp.names = ( 'B', 'N', 'E', ) emb_inp_size = emb_inp.size() summary(model, input_size=[emb_inp_size], device=device, dtypes=[torch.float]) # Forward prob_impress_to_click, prob_impress_to_d_action, prob_impress_to_buy = model.forward( emb_inp) self.assertEqual(prob_impress_to_click.size(), (batch_size, 1)) self.assertEqual(prob_impress_to_d_action.size(), (batch_size, 1)) self.assertEqual(prob_impress_to_buy.size(), (batch_size, 1)) print( f'Prob impress to click Size: {prob_impress_to_click.size()},\n' f'Prob impress to d action Size: {prob_impress_to_d_action.size()},\n' f'Prob impress to buy Size: {prob_impress_to_buy.size()},\n')
def _main(): """ネットワーク構造の確認用""" model = CNN() batch_size = 1 summary(model, input_data=[batch_size, 720], col_names=["output_size", "num_params"])
def test_forward(self, batch_size: int, length: int, embed_size: int): model = PersonalizedReRankingModel(embed_size=embed_size, max_num_position=length, encoding_size=16, num_heads=4, num_layers=2, use_bias=True, dropout=0.9, fnn_dropout_p=0.9, fnn_activation=nn.ReLU()) model = model.to(device) # Generate inputs for the layer feat_inp = torch.randint(0, length, (batch_size, length, embed_size)) feat_inp.names = ( 'B', 'L', 'E', ) feat_inp_size = feat_inp.size() summary(model, input_size=[feat_inp_size], device=device, dtypes=[torch.int]) # Forward outputs = model.forward(feat_inp) self.assertEqual(outputs.size(), (batch_size, length)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, num_fields: int, embed_size: int): model = DeepFieldAwareFactorizationMachineModel( embed_size=embed_size, num_fields=num_fields, deep_output_size=4, deep_layer_sizes=[32, 16, 8], ffm_dropout_p=0.9, deep_dropout_p=[0.9, 0.9, 0.9], deep_activation=nn.ReLU6()) model = model.to(device) # Generate inputs for the layer field_emb_inp = torch.rand(batch_size, num_fields**2, embed_size) field_emb_inp.names = ( 'B', 'N', 'E', ) field_emb_inp_size = field_emb_inp.size() summary(model, input_size=[field_emb_inp_size], device=device, dtypes=[torch.float]) # Forward outputs = model.forward(field_emb_inp) self.assertEqual(outputs.size(), (batch_size, 1)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_single_input_all_cols(capsys: pytest.CaptureFixture[str]) -> None: model = SingleInputNet() col_names = ( "kernel_size", "input_size", "output_size", "num_params", "mult_adds", ) input_shape = (7, 1, 28, 28) summary(model, input_size=input_shape, depth=1, col_names=col_names, col_width=20) verify_output(capsys, "tests/test_output/single_input_all.out") summary( model, input_data=torch.randn(*input_shape), depth=1, col_names=col_names, col_width=20, ) verify_output(capsys, "tests/test_output/single_input_all.out")
def test_forward(self, batch_size: int, embed_size: int): layer = StarSpaceLayer(similarity=functional.cosine_similarity) layer = layer.to(device) print( f'Input Size: {layer.inputs_size};\nOutput Size: {layer.outputs_size}' ) # Generate inputs for the layer inp = torch.rand(batch_size, 2, embed_size) inp.names = ( 'B', 'N', 'E', ) inp_size = inp.size() summary(layer, input_size=[inp_size], device=device, dtypes=[torch.float]) # Forward outputs = layer.forward(inp) self.assertEqual(outputs.size(), (batch_size, embed_size)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, num_fields: int, embed_size: int, output_size: int): layer = CompressInteractionNetworkLayer(embed_size=embed_size, num_fields=num_fields, output_size=output_size, layer_sizes=[32, 64, 32], is_direct=False, use_bias=True, use_batchnorm=True, activation=nn.ReLU6()) layer = layer.to(device) print( f'Input Size: {layer.inputs_size};\nOutput Size: {layer.outputs_size}' ) # Generate inputs for the layer inp = torch.rand(batch_size, num_fields, embed_size) inp.names = ( 'B', 'N', 'E', ) inp_size = inp.size() summary(layer, input_size=[inp_size], device=device, dtypes=[torch.float]) # Forward outputs = layer.forward(inp) self.assertEqual(outputs.size(), (batch_size, output_size)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_exception_output(capsys: pytest.CaptureFixture[str]) -> None: input_size = (1, 1, 28, 28) summary(EdgeCaseModel(throw_error=False), input_size=input_size) with pytest.raises(RuntimeError): summary(EdgeCaseModel(throw_error=True), input_size=input_size) verify_output(capsys, "unit_test/test_output/exception.out")
def test_forward(self, batch_size: int, num_fields: int, embed_size: int): output_size = 16 dropout_p = 0.9 layer = WideLayer(inputs_size=embed_size, output_size=output_size, dropout_p=dropout_p) layer = layer.to(device) print( f'Input Size: {layer.inputs_size};\nOutput Size: {layer.outputs_size}' ) # Generate inputs for the layer inp = torch.rand(batch_size, num_fields, embed_size) inp.names = ( 'B', 'N', 'E', ) inp_size = inp.size() summary(layer, input_size=[inp_size], device=device, dtypes=[torch.float]) # Forward outputs = layer.forward(inp) self.assertEqual(outputs.size(), (batch_size, num_fields, output_size)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, embed_size: int): layer = GeneralizedMatrixFactorizationLayer() layer = layer.to(device) print( f'Input Size: {layer.inputs_size};\nOutput Size: {layer.outputs_size}' ) # Generate inputs for the layer inp = torch.rand(batch_size, 2, embed_size) inp.names = ( 'B', 'N', 'E', ) inp_size = inp.size() summary(layer, input_size=[inp_size], device=device, dtypes=[torch.float]) # Forward outputs = layer.forward(inp) self.assertEqual(outputs.size(), (batch_size, 1)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, num_fields: int, embed_size: int): layer = OuterProductNetworkLayer(embed_size=embed_size, num_fields=num_fields, kernel_type='mat') layer = layer.to(device) print( f'Input Size: {layer.inputs_size};\nOutput Size: {layer.outputs_size}' ) # Generate inputs for the layer inp = torch.rand(batch_size, num_fields, embed_size) inp.names = ( 'B', 'N', 'E', ) inp_size = inp.size() summary(layer, input_size=[inp_size], device=device, dtypes=[torch.float]) # Forward outputs = layer.forward(inp) self.assertEqual(outputs.size(), (batch_size, comb(num_fields, 2))) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, seq_length: int, embed_size: int): layer = PositionEmbeddingLayer(max_num_position=seq_length) layer = layer.to(device) print( f'Input Size: {layer.inputs_size};\nOutput Size: {layer.outputs_size}' ) # Generate inputs for the layer inp = torch.rand(batch_size, seq_length, embed_size) inp.names = ( 'B', 'L', 'E', ) inp_size = inp.size() summary(layer, input_size=[inp_size], device=device, dtypes=[torch.float]) # Forward outputs = layer.forward(inp) self.assertEqual(outputs.size(), (batch_size, seq_length, embed_size)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, num_fields: int, embed_size: int): layer = FieldAwareFactorizationMachineLayer(num_fields=num_fields, dropout_p=0.9) layer = layer.to(device) print( f'Input Size: {layer.inputs_size};\nOutput Size: {layer.outputs_size}' ) # Generate inputs for the layer inp = torch.rand(batch_size, num_fields**2, embed_size) inp.names = ( 'B', 'N', 'E', ) inp_size = inp.size() summary(layer, input_size=[inp_size], device=device, dtypes=[torch.float]) # Forward outputs = layer.forward(inp) self.assertEqual(outputs.size(), (batch_size, comb(num_fields, 2), embed_size)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_forward(self, batch_size: int, num_fields: int, embed_size: int, output_size: int): layer = DynamicRoutingLayer(embed_size=embed_size, routed_size=output_size, max_num_caps=16, num_iter=2) layer = layer.to(device) print( f'Input Size: {layer.inputs_size};\nOutput Size: {layer.outputs_size}' ) # Generate inputs for the layer inp = torch.rand(batch_size, num_fields, embed_size) inp.names = ( 'B', 'N', 'E', ) inp_size = inp.size() summary(layer, input_size=[inp_size], device=device, dtypes=[torch.float]) # Forward outputs = layer.forward(inp) self.assertEqual(outputs.size(), (batch_size, layer.num_caps, output_size)) print( f'Output Size: {outputs.size()}, Output Dimensions: {outputs.names}' )
def test_image_inputs(self): embedder = ImageInput( embed_size=128, in_channels=3, layers_size=[128, 128, 128], kernels_size=[3, 3, 3], strides=[1, 1, 1], paddings=[1, 1, 1], pooling='max_pooling', use_batchnorm=True, dropout_p=0.2, activation=nn.ReLU() ) embedder = embedder.to(self.device) batch_size = 8 num_channels = 3 height = 256 weight = 256 inp = torch.rand(batch_size, num_channels, height, weight) inp = inp.to(self.device) inp_size = inp.size() print(f'Input Size: {inp_size}') torchinfo.summary(embedder, input_size=list(inp_size)) out = embedder(inp) print(f'Output Size: {out.size()}')
def unet_dataloader(): random_dataset = [] for i in range(0, 100): random_dataset.append(( torch.from_numpy(np.random.rand(3, 20, 20).astype("f")), (torch.from_numpy(np.random.rand(1, 20, 20).astype("f"))), )) random_dataset_loader = DataLoader(random_dataset, batch_size=10, shuffle=True) # Training model = UNet() print(model) summary(model) optimizer = torch.optim.Adam(model.parameters(), lr=0.1) loss_func = torch.nn.MSELoss() epochs = 5 loss_list = [] for e in range(epochs): training_loss = 0.0 for img, label in random_dataset_loader: prediction = model(img) loss = loss_func(prediction, label) optimizer.zero_grad() loss.backward() optimizer.step() training_loss += loss.data.item() loss_list.append(training_loss) if e % 10 == 0: print("Epoch: {}, Training Loss: {:.2f}".format(e, training_loss))
def test_row_settings(capsys: pytest.CaptureFixture[str]) -> None: model = SingleInputNet() summary(model, input_size=(16, 1, 28, 28), row_settings=("var_names", )) verify_output(capsys, "tests/test_output/row_settings.out")
def log_model(self, model: ITrainableModel, device: str): try: import torchinfo torchinfo.summary(model, input_data=model.example_inputs(), device=device) except ImportError: self.logger.info(model)
def model_details(model, x): print("Model summary:") summary( model, input_size=(config.BATCH_SIZE, 1, config.SIZE, config.SIZE), verbose=1, ) print(f"Output size: {model(x).shape}")
def test_dict_out(capsys: pytest.CaptureFixture[str]) -> None: # TODO: expand this test to handle intermediate dict layers. model = MultipleInputNetDifferentDtypes() input_data = torch.randn(1, 300) other_input_data = torch.randn(1, 300).long() summary(model, input_data={"x1": input_data, "x2": other_input_data}) verify_output(capsys, "tests/test_output/dict_input.out")
def plot(self): self.eval() # dummy = {} # for k, v in self.dummy.items(): # dummy[int(k)] = v # torchinfo.summary(self, input_data=self.dummy) if isinstance(self.dummy, dict): torchinfo.summary(self, input_size=None, **self.dummy) else: torchinfo.summary(self, input_data=self.dummy)