def __init__(self, image_shape, hidden_channels, K, L, actnorm_scale, flow_permutation, flow_coupling, LU_decomposed, y_classes, learn_top, y_condition): super().__init__() self.flow = FlowNet(image_shape=image_shape, hidden_channels=hidden_channels, K=K, L=L, actnorm_scale=actnorm_scale, flow_permutation=flow_permutation, flow_coupling=flow_coupling, LU_decomposed=LU_decomposed) self.y_classes = y_classes self.y_condition = y_condition self.learn_top = learn_top # learned prior if learn_top: C = self.flow.output_shapes[-1][1] self.learn_top_fn = Conv2dZeros(C * 2, C * 2) if y_condition: C = self.flow.output_shapes[-1][1] self.project_ycond = LinearZeros(y_classes, 2 * C) self.project_class = LinearZeros(C, y_classes) self.register_buffer( "prior_h", torch.zeros([ 1, self.flow.output_shapes[-1][1] * 2, self.flow.output_shapes[-1][2], self.flow.output_shapes[-1][3] ]))
def __init__(self, image_shape, hidden_channels, K, L, actnorm_scale, flow_permutation, flow_coupling, LU_decomposed, y_classes, learn_top, y_condition, extra_condition, sp_condition, d_condition, yd_condition): super().__init__() self.flow = FlowNet(image_shape=image_shape, hidden_channels=hidden_channels, K=K, L=L, actnorm_scale=actnorm_scale, flow_permutation=flow_permutation, flow_coupling=flow_coupling, LU_decomposed=LU_decomposed, extra_condition=extra_condition, sp_condition=sp_condition, num_classes=y_classes) self.y_classes = y_classes if y_condition or d_condition or yd_condition: self.y_condition = True print("conditional version", self.y_condition) else: self.y_condition=False self.learn_top = learn_top print("extra condtion", extra_condition) print("split prior condition", sp_condition) # learned prior if learn_top: C = self.flow.output_shapes[-1][1] self.learn_top_fn = Conv2dZeros(C * 2, C * 2) if self.y_condition: C = self.flow.output_shapes[-1][1] print("prior", 2*C) self.project_ycond = LinearZeros(y_classes, 2 * C) self.project_class = LinearZeros(C, y_classes) self.register_buffer("prior_h", torch.zeros([1, self.flow.output_shapes[-1][1] * 2, self.flow.output_shapes[-1][2], self.flow.output_shapes[-1][3]]))
def __init__(self, image_shape, hidden_channels, K, L, actnorm_scale, flow_permutation, flow_coupling, LU_decomposed, flow_embed_dim, y_classes, learn_top, y_condition): super().__init__() self.flow = FlowNet(image_shape=image_shape, hidden_channels=hidden_channels, K=K, L=L, actnorm_scale=actnorm_scale, flow_permutation=flow_permutation, flow_coupling=flow_coupling, LU_decomposed=LU_decomposed, flow_embed_dim=flow_embed_dim) self.y_classes = y_classes self.y_condition = y_condition self.learn_top = learn_top # learned prior if learn_top: C = self.flow.output_shapes[-1][1] self.learn_top_fn = Conv2dZeros(C * 2, C * 2) if y_condition: C = self.flow.output_shapes[-1][1] self.project_ycond = LinearZeros(y_classes, 2 * C) self.project_class = LinearZeros(C, y_classes) self.register_buffer( "prior_h", torch.zeros([ 1, self.flow.output_shapes[-1][1] * 2, self.flow.output_shapes[-1][2], self.flow.output_shapes[-1][3] ])) self.num_param = sum(p.numel() for p in self.parameters() if p.requires_grad) print("num_param: {}".format(self.num_param))