def create_lenet5_model(): with sg.Graph(name="lenet5_smv", backend="SMV") as graph: # Tensors and kernels are initialized as NCHW layout. input_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (1, 28, 28, 1))) conv0_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (32, 3, 3, 1))) conv1_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (32, 3, 3, 32))) fc0_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((128, 4608))) fc1_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((10, 128))) act = sg.input_data(input_tensor) act = sg.nn.convolution(act, conv0_tensor, stride=[1, 1], padding="valid", activation="relu") act = sg.nn.convolution(act, conv1_tensor, stride=[1, 1], padding="valid", activation="relu") act = sg.nn.max_pool(act, pool_size=[2, 2], stride=[2, 2]) act = sg.nn.mat_mul(act, fc0_tensor, activation="relu") act = sg.nn.mat_mul(act, fc1_tensor) return graph
def create_resnet50(): with sg.Graph(name="resnet_smv", backend="SMV") as graph: # sg.Tensors and kernels are initialized as sg.NCHW layout. input_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((1, 225, 225, 3))) conv0_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((64, 7, 7, 3))) bn0_mean_tensor = sg.Tensor( data_layout=sg.NC, tensor_data=generate_random_data((1, 64))) bn0_var_tensor = sg.Tensor( data_layout=sg.NC, tensor_data=generate_random_data((1, 64))) bn0_gamma_tensor = sg.Tensor( data_layout=sg.NC, tensor_data=generate_random_data((1, 64))) bn0_beta_tensor = sg.Tensor( data_layout=sg.NC, tensor_data=generate_random_data((1, 64))) fc_tensor = sg.Tensor( data_layout=sg.NC, tensor_data=generate_random_data((10, 7 * 7 * 2048))) x = sg.input_data(input_tensor, name="input") x = sg.nn.convolution( x, conv0_tensor, stride=[2, 2], padding="same", name="conv0") x = sg.nn.batch_norm( x, bn0_mean_tensor, bn0_var_tensor, bn0_gamma_tensor, bn0_beta_tensor, activation="relu", name="bn0") x = sg.nn.max_pool(x, pool_size=[3, 3], stride=[2, 2], name="pool") # Four resnet blocks. x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1)) x = identity_block(x, 3, [64, 64, 256], stage=2, block='b') x = identity_block(x, 3, [64, 64, 256], stage=2, block='c') x = conv_block(x, 3, [128, 128, 512], stage=3, block='a') x = identity_block(x, 3, [128, 128, 512], stage=3, block='b') x = identity_block(x, 3, [128, 128, 512], stage=3, block='c') x = identity_block(x, 3, [128, 128, 512], stage=3, block='d') x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e') x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f') x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b') x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c') x = sg.nn.mat_mul(x, fc_tensor, name="fc") return graph
def create_sequential_model(): with sg.Graph(name="sequential_graph", backend="Reference") as graph: # Tensors and weights are initialized as NCHW layout. input_tensor = sg.Tensor( tensor_data=np.random.rand(1, 3, 32, 32).astype(np.float32)) filter_tensor0 = sg.Tensor( tensor_data=np.random.rand(64, 3, 3, 3).astype(np.float32)) filter_tensor1 = sg.Tensor( tensor_data=np.random.rand(64, 64, 3, 3).astype(np.float32)) weight_tensor0 = sg.Tensor(data_layout=sg.NC, tensor_data=np.random.rand( 256, 16384).astype(np.float32)) weight_tensor1 = sg.Tensor(data_layout=sg.NC, tensor_data=np.random.rand(10, 256).astype( np.float32)) bn_mean_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=np.random.rand(1, 64).astype( np.float32)) bn_var_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=np.random.rand(1, 64).astype( np.float32)) bn_gamma_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=np.random.rand(1, 64).astype( np.float32)) bn_beta_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=np.random.rand(1, 64).astype( np.float32)) out = sg.input_data(input_tensor) out = sg.nn.convolution(out, filter_tensor0, stride=[1, 1], padding="same", activation="relu") out = sg.nn.batch_norm(out, bn_mean_tensor, bn_var_tensor, bn_gamma_tensor, bn_beta_tensor) out = sg.nn.convolution(out, filter_tensor1, stride=[1, 1], padding="same", activation="relu") out = sg.nn.max_pool(out, pool_size=[2, 2], stride=[2, 2]) out = sg.tensor.flatten(out) out = sg.nn.mat_mul(out, weight_tensor0, activation="relu") out = sg.nn.mat_mul(out, weight_tensor1) return graph
def create_residual_model(): with sg.Graph(name="residual_graph", backend="SMV") as graph: # Tensors and kernels are initialized as NCHW layout. input_tensor = sg.Tensor( tensor_data=np.random.rand(1, 1, 28, 28).astype(np.float16)) filter_tensor0 = sg.Tensor( tensor_data=np.random.rand(64, 1, 3, 3).astype(np.float16)) filter_tensor1 = sg.Tensor( tensor_data=np.random.rand(64, 1, 3, 3).astype(np.float16)) filter_tensor2 = sg.Tensor( tensor_data=np.random.rand(64, 64, 3, 3).astype(np.float16)) bn_mean_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=np.random.rand(1, 64).astype( np.float16)) bn_var_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=np.random.rand(1, 64).astype( np.float16)) bn_gamma_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=np.random.rand(1, 64).astype( np.float16)) bn_beta_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=np.random.rand(1, 64).astype( np.float16)) act = sg.input_data(input_tensor) x = sg.nn.convolution(act, filter_tensor0, stride=[1, 1], padding="same") out = sg.nn.convolution(act, filter_tensor1, stride=[1, 1], padding="same") out = sg.nn.batch_norm(out, bn_mean_tensor, bn_var_tensor, bn_gamma_tensor, bn_beta_tensor, activation="relu") out = sg.nn.convolution(out, filter_tensor2, stride=[1, 1], padding="same") out = sg.math.add(x, out) return graph
def create_minerva_model(): with sg.Graph(name="minerva_smv", backend="SMV") as graph: # Tensors and kernels are initialized as NCHW layout. input_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (1, 28, 28, 1))) fc0_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((256, 784))) fc1_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((256, 256))) fc2_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((256, 256))) fc3_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((10, 256))) act = sg.input_data(input_tensor) act = sg.nn.mat_mul(act, fc0_tensor, activation="relu") act = sg.nn.mat_mul(act, fc1_tensor, activation="relu") act = sg.nn.mat_mul(act, fc2_tensor, activation="relu") act = sg.nn.mat_mul(act, fc3_tensor) return graph
def create_lstm_model(): with sg.Graph(name="bidi_lstm_smv", backend="SMV") as graph: input_tensor = sg.Tensor( data_layout=sg.NTC, tensor_data=generate_random_data((1, 4, 32))) # sg.Tensors and kernels are initialized as NC layout. # Weights of forward LSTM. w_f = sg.Tensor( data_layout=sg.NC, tensor_data=generate_random_data((128, 32))) u_f = sg.Tensor( data_layout=sg.NC, tensor_data=generate_random_data((128, 32))) # Weights of backward LSTM. w_b = sg.Tensor( data_layout=sg.NC, tensor_data=generate_random_data((128, 32))) u_b = sg.Tensor( data_layout=sg.NC, tensor_data=generate_random_data((128, 32))) # Inputs specified in shape (batch, timestep, size) inputs = sg.input_data(input_tensor, name="input") bidi_lstm = sg.nn.BidirectionalLSTM([w_f, u_f], [w_b, u_b], name="bidi_lstm") outputs, state_fwd, state_bwd = bidi_lstm(inputs) return graph
def create_lstm_model(): with sg.Graph(name="lstm_ref", backend="Reference") as graph: input_tensor = sg.Tensor(data_layout=sg.NTC, tensor_data=generate_random_data((1, 4, 32))) # sg.Tensors and kernels are initialized as NC layout. # Layer 1 of LSTM. w0 = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((128, 32))) u0 = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((128, 32))) # Layer 2 of LSTM. w1 = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((128, 32))) u1 = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((128, 32))) # Inputs specified in shape (batch, timestep, size) inputs = sg.input_data(input_tensor, name="input") lstm_layer0 = sg.nn.LSTM([w0, u0], name="lstm0") lstm_layer1 = sg.nn.LSTM([w1, u1], name="lstm1") outputs, state = lstm_layer0(inputs) outputs, state = lstm_layer1(outputs) return graph
def create_vgg_model(): with sg.Graph(name="vgg_ref", backend="Reference") as graph: input_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (1, 32, 32, 3))) conv0_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (64, 3, 3, 3))) conv1_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (128, 3, 3, 64))) conv2_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (128, 3, 3, 128))) conv3_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (128, 3, 3, 128))) conv4_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (256, 3, 3, 128))) conv5_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (256, 3, 3, 256))) conv6_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (256, 3, 3, 256))) conv7_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (512, 3, 3, 256))) conv8_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (512, 3, 3, 512))) conv9_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (512, 3, 3, 512))) fc0_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((512, 2048))) fc1_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((10, 512))) act = sg.input_data(input_tensor) act = sg.nn.convolution(act, conv0_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.convolution(act, conv1_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.max_pool(act, pool_size=[2, 2], stride=[2, 2]) act = sg.nn.convolution(act, conv2_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.convolution(act, conv3_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.max_pool(act, pool_size=[2, 2], stride=[2, 2]) act = sg.nn.convolution(act, conv4_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.convolution(act, conv5_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.convolution(act, conv6_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.max_pool(act, pool_size=[2, 2], stride=[2, 2]) act = sg.nn.convolution(act, conv7_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.convolution(act, conv8_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.convolution(act, conv9_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.max_pool(act, pool_size=[2, 2], stride=[2, 2]) act = sg.nn.mat_mul(act, fc0_tensor, activation="relu") act = sg.nn.mat_mul(act, fc1_tensor) return graph
def create_cnn_model(): with sg.Graph(name="cnn_ref", backend="Reference", mem_policy=sg.AllDma) as graph: input_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (1, 32, 32, 3))) conv0_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (32, 3, 3, 3))) bn0_mean_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((1, 32))) bn0_var_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((1, 32))) bn0_gamma_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((1, 32))) bn0_beta_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((1, 32))) conv1_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (32, 3, 3, 32))) bn1_mean_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((1, 32))) bn1_var_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((1, 32))) bn1_gamma_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((1, 32))) bn1_beta_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((1, 32))) conv2_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (64, 3, 3, 32))) conv3_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (64, 3, 3, 64))) bn2_mean_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((1, 64))) bn2_var_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((1, 64))) bn2_gamma_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((1, 64))) bn2_beta_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((1, 64))) fc0_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((512, 4096))) fc1_tensor = sg.Tensor(data_layout=sg.NC, tensor_data=generate_random_data((10, 512))) act = sg.input_data(input_tensor) act = sg.nn.convolution(act, conv0_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.batch_norm(act, bn0_mean_tensor, bn0_var_tensor, bn0_gamma_tensor, bn0_beta_tensor) act = sg.nn.convolution(act, conv1_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.max_pool(act, pool_size=[2, 2], stride=[2, 2]) act = sg.nn.batch_norm(act, bn1_mean_tensor, bn1_var_tensor, bn1_gamma_tensor, bn1_beta_tensor) act = sg.nn.convolution(act, conv2_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.convolution(act, conv3_tensor, stride=[1, 1], padding="same", activation="relu") act = sg.nn.max_pool(act, pool_size=[2, 2], stride=[2, 2]) act = sg.nn.batch_norm(act, bn2_mean_tensor, bn2_var_tensor, bn2_gamma_tensor, bn2_beta_tensor) act = sg.nn.mat_mul(act, fc0_tensor, activation="relu") act = sg.nn.mat_mul(act, fc1_tensor) return graph
def create_elu_model(): with sg.Graph(name="large_elu_ref", backend="Reference", mem_policy=sg.AllDma) as graph: # Tensors and kernels are initialized as NCHW layout. input_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((1, 32, 32, 3))) conv0_stack0_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((384, 3, 3, 3))) conv0_stack1_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((384, 1, 1, 384))) conv1_stack1_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((384, 2, 2, 384))) conv2_stack1_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((640, 2, 2, 384))) conv3_stack1_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((640, 2, 2, 640))) conv0_stack2_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((640, 1, 1, 640))) conv1_stack2_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((768, 2, 2, 640))) conv2_stack2_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((768, 2, 2, 768))) conv3_stack2_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((768, 2, 2, 768))) conv0_stack3_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((768, 1, 1, 768))) conv1_stack3_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((896, 2, 2, 768))) conv2_stack3_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((896, 2, 2, 896))) conv0_stack4_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data((896, 3, 3, 896))) conv1_stack4_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data( (1024, 2, 2, 896))) conv2_stack4_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data( (1024, 2, 2, 1024))) conv0_stack5_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data( (1024, 1, 1, 1024))) conv1_stack5_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data( (1152, 2, 2, 1024))) conv0_stack6_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data( (1152, 1, 1, 1152))) conv0_stack7_tensor = sg.Tensor( data_layout=sg.NHWC, tensor_data=generate_random_data( (100, 1, 1, 1152))) act = sg.input_data(input_tensor, name="input") # Stack 0 act = sg.nn.convolution( act, conv0_stack0_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack0") act = sg.nn.max_pool( act, pool_size=[2, 2], stride=[2, 2], name="pool_stack0") # Stack 1 act = sg.nn.convolution( act, conv0_stack1_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack1") act = sg.nn.convolution( act, conv1_stack1_tensor, stride=[1, 1], padding="same", activation="elu", name="conv1_stack1") act = sg.nn.convolution( act, conv2_stack1_tensor, stride=[1, 1], padding="same", activation="elu", name="conv2_stack1") act = sg.nn.convolution( act, conv3_stack1_tensor, stride=[1, 1], padding="same", activation="elu", name="conv3_stack1") act = sg.nn.max_pool( act, pool_size=[2, 2], stride=[2, 2], name="pool_stack1") # Stack 2 act = sg.nn.convolution( act, conv0_stack2_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack2") act = sg.nn.convolution( act, conv1_stack2_tensor, stride=[1, 1], padding="same", activation="elu", name="conv1_stack2") act = sg.nn.convolution( act, conv2_stack2_tensor, stride=[1, 1], padding="same", activation="elu", name="conv2_stack2") act = sg.nn.convolution( act, conv3_stack2_tensor, stride=[1, 1], padding="same", activation="elu", name="conv3_stack2") act = sg.nn.max_pool( act, pool_size=[2, 2], stride=[2, 2], name="pool_stack2") # Stack 3 act = sg.nn.convolution( act, conv0_stack3_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack3") act = sg.nn.convolution( act, conv1_stack3_tensor, stride=[1, 1], padding="same", activation="elu", name="conv1_stack3") act = sg.nn.convolution( act, conv2_stack3_tensor, stride=[1, 1], padding="same", activation="elu", name="conv2_stack3") act = sg.nn.max_pool( act, pool_size=[2, 2], stride=[2, 2], name="pool_stack3") # Stack 4 act = sg.nn.convolution( act, conv0_stack4_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack4") act = sg.nn.convolution( act, conv1_stack4_tensor, stride=[1, 1], padding="same", activation="elu", name="conv1_stack4") act = sg.nn.convolution( act, conv2_stack4_tensor, stride=[1, 1], padding="same", activation="elu", name="conv2_stack4") act = sg.nn.max_pool( act, pool_size=[2, 2], stride=[2, 2], name="pool_stack4") # Stack 5 act = sg.nn.convolution( act, conv0_stack5_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack5") act = sg.nn.convolution( act, conv1_stack5_tensor, stride=[1, 1], padding="same", activation="elu", name="conv1_stack5") # Stack 6 act = sg.nn.convolution( act, conv0_stack6_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack6") # Stack 7 act = sg.nn.convolution( act, conv0_stack7_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack7") return graph
def create_elu_model(): with sg.Graph(name="elu_ref", backend="Reference", mem_policy=sg.AllDma) as graph: # sg.Tensors and kernels are initialized as NCHW layout. input_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (1, 32, 32, 3))) conv0_stack0_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (192, 5, 5, 3))) conv0_stack1_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (192, 1, 1, 192))) conv1_stack1_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (240, 3, 3, 192))) conv0_stack2_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (240, 1, 1, 240))) conv1_stack2_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (260, 2, 2, 240))) conv0_stack3_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (260, 1, 1, 260))) conv1_stack3_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (280, 2, 2, 260))) conv0_stack4_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (280, 1, 1, 280))) conv1_stack4_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (300, 2, 2, 280))) conv0_stack5_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (300, 1, 1, 300))) conv0_stack6_tensor = sg.Tensor(data_layout=sg.NHWC, tensor_data=generate_random_data( (100, 1, 1, 300))) act = sg.input_data(input_tensor, name="input") act = sg.nn.convolution(act, conv0_stack0_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack0") act = sg.nn.max_pool(act, pool_size=[2, 2], stride=[2, 2], name="pool_stack0") act = sg.nn.convolution(act, conv0_stack1_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack1") act = sg.nn.convolution(act, conv1_stack1_tensor, stride=[1, 1], padding="same", activation="elu", name="conv1_stack1") act = sg.nn.max_pool(act, pool_size=[2, 2], stride=[2, 2], name="pool_stack1") act = sg.nn.convolution(act, conv0_stack2_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack2") act = sg.nn.convolution(act, conv1_stack2_tensor, stride=[1, 1], padding="same", activation="elu", name="conv1_stack2") act = sg.nn.max_pool(act, pool_size=[2, 2], stride=[2, 2], name="pool_stack2") act = sg.nn.convolution(act, conv0_stack3_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack3") act = sg.nn.convolution(act, conv1_stack3_tensor, stride=[1, 1], padding="same", activation="elu", name="conv1_stack3") act = sg.nn.max_pool(act, pool_size=[2, 2], stride=[2, 2], name="pool_stack3") act = sg.nn.convolution(act, conv0_stack4_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack4") act = sg.nn.convolution(act, conv1_stack4_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack4") act = sg.nn.max_pool(act, pool_size=[2, 2], stride=[2, 2], name="pool_stack4") act = sg.nn.convolution(act, conv0_stack5_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack5") act = sg.nn.convolution(act, conv0_stack6_tensor, stride=[1, 1], padding="same", activation="elu", name="conv0_stack6") return graph