def _initialize(self, interactions): self._num_items = interactions.num_items self._num_users = interactions.num_users self.test_sequence = interactions.test_sequences self._net = Model3(self._num_users, self._num_items, self.model_args).to(self._device) self._optimizer = optim.Adam(self._net.parameters(), weight_decay=self._l2, lr=self._learning_rate)
tf.cast(tf.equal(tf.argmax(predictions1, 1), tf.argmax(model1.Y, 1)), tf.float32)) # Make model 2 model2 = Model2(X2, Y2, keep_prob2) logits2, predictions2 = model2.build() loss_op2 = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=logits2, labels=model2.Y2)) train_op2 = tf.train.AdamOptimizer( learning_rate=model2.learning_rate).minimize(loss_op2) accuracy2 = tf.reduce_mean( tf.cast(tf.equal(tf.argmax(predictions2, 1), tf.argmax(model2.Y2, 1)), tf.float32)) # Make model 3 model3 = Model3(X3, Y3, keep_prob3) logits3, predictions3 = model3.build() loss_op3 = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=logits3, labels=model3.Y3)) train_op3 = tf.train.AdamOptimizer( learning_rate=model3.learning_rate).minimize(loss_op3) accuracy3 = tf.reduce_mean( tf.cast(tf.equal(tf.argmax(predictions3, 1), tf.argmax(model3.Y3, 1)), tf.float32)) # # Make model 4 model4 = Model4(logitse1, logitse2, Y4) logits4, predictions4 = model4.build() loss_op4 = tf.reduce_mean( tf.nn.softmax_cross_entropy_with_logits(logits=logits4, labels=model4.Y4)) train_op4 = tf.train.AdamOptimizer(