def _graph_dict(): # This function creates a graph that has no real meaning other than # providing something to traverse. d = {} d['i1'] = sequence.input(shape=(2, 3), sequence_axis=Axis('ia'), name='i1') d['c1'] = constant(shape=(2, 3), value=6, name='c1') d['p1'] = parameter(shape=(3, 2), init=7, name='p1') d['op1'] = plus(d['i1'], d['c1'], name='op1') d['op2'] = times(d['op1'], d['p1'], name='op2') #d['slice'] = slice(d['c1'], Axis.default_dynamic_axis(), 0, 3) #label_sentence_start = sequence.first(raw_labels) # no name d['p2'] = parameter(shape=(2, 2)) # duplicate names d['op3a'] = plus(d['op2'], d['p2'], name='op3') d['op3b'] = plus(d['op3a'], d['p2'], name='op3') d['first'] = sequence.first(d['op3b'], name='past') d['root'] = d['first'] return d
unittest_helper(input_op, forward_input, expected_forward, expected_backward, device_id=device_id, precision=precision) RESHAPE_SUBSHAPE_TEST_CASES = [ #(input_shape, replacement_shape, begin_axis, end_axis, expected_output_shape) ((2, 3), (3, 2), 0, Axis.new_leading_axis(), (3, 2)), ((2, 3), (1), 0, 0, (1, 2, 3)), ((2, 3), (1, 1), Axis.new_leading_axis(), Axis.new_leading_axis(), (2, 3, 1, 1)), ((2, 3, 5), (C.InferredDimension), 0, Axis(2), (6, 5)), ((2, 3, 5), (C.InferredDimension), Axis(-3), -1, (6, 5)), ((6, 5), (2, C.InferredDimension), 0, 1, (2, 3, 5)), ] @pytest.mark.parametrize( "input_shape, replacement_shape, begin_axis, end_axis, expected_output_shape", RESHAPE_SUBSHAPE_TEST_CASES) def test_op_reshape_subshape(input_shape, replacement_shape, begin_axis, end_axis, expected_output_shape, device_id, precision): # Reshaping is just moving the input values to different indexes of the result tensor. # If we compute the gradients on the unmodified tensor, reshape would get 1 for all inputs # For testing the gradients we want to have different gradients for each input index otherwise we can't # test if they get wrongly permuted during test. To this end we multiply