Esempio n. 1
0
    def forward(self, is_train, req, in_data, out_data, aux):
        rois = in_data[-1].asnumpy()
        w = rois[:, 3] - rois[:, 1] + 1
        h = rois[:, 4] - rois[:, 2] + 1

        #Leonid fix, just in case w / h will misbehave
        w = np.maximum(w, 1e-7)
        h = np.maximum(h, 1e-7)

        feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0,
                          len(self.feat_strides) - 1)
        pyramid_idx = []

        rois_p = [None for _ in range(self.num_strides)]
        for i in range(self.num_strides):
            self.feat_idx[i] = np.where(feat_id == i)[0]
            if len(self.feat_idx[i]) == 0:
                # padding dummy roi
                rois_p[i] = np.zeros((1, 5))
                pyramid_idx.append(-1)
            else:
                rois_p[i] = rois[self.feat_idx[i]]
                pyramid_idx.append(self.feat_idx[i])
        rois_idx = np.argsort(np.hstack(pyramid_idx))[-rois.shape[0]:]

        if is_train:
            for i in range(self.num_strides):
                self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i]))

            autograd.mark_variables(
                [in_data[i] for i in range(self.num_strides)],
                self.in_grad_hist_list)
            with autograd.train_section():
                for i in range(self.num_strides):
                    self.roi_pool[i] = mx.nd.ROIPooling(
                        in_data[i],
                        mx.nd.array(rois_p[i], in_data[i].context), (7, 7),
                        spatial_scale=1.0 / self.feat_strides[i])

                    rpn_conv = mx.sym.Convolution(
                        data=in_data[i],
                        kernel=(3, 3),
                        pad=(1, 1),
                        num_filter=512,
                        name='rpn_conv_' + suffix,
                        weight=self.shared_param_dict['rpn_conv_weight'],
                        bias=self.shared_param_dict['rpn_conv_bias'])

        else:
            roi_pool = [None for _ in range(self.num_strides)]

            for i in range(self.num_strides):
                roi_pool[i] = mx.nd.ROIPooling(
                    in_data[i],
                    mx.nd.array(rois_p[i], in_data[i].context), (7, 7),
                    spatial_scale=1.0 / self.feat_strides[i])

        roi_pool = mx.nd.take(roi_pool, mx.nd.array(rois_idx,
                                                    roi_pool.context))
        self.assign(out_data[0], req[0], roi_pool)
Esempio n. 2
0
    def forward(self, is_train, req, in_data, out_data, aux):
        rois = in_data[-1].asnumpy()
        # w = rois[:, 3] - rois[:, 1] + 1
        # h = rois[:, 4] - rois[:, 2] + 1
        w = np.maximum(rois[:, 3], 1)
        h = np.maximum(rois[:, 4], 1)
        # TODO: carefully scale the w, h
        feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0,
                          len(self.feat_strides) - 1)
        pyramid_idx = []

        rois_p = [None for _ in range(self.num_strides)]
        for i in range(self.num_strides):
            self.feat_idx[i] = np.where(feat_id == i)[0]
            if len(self.feat_idx[i]) == 0:
                # padding dummy roi
                rois_p[i] = np.zeros((1, 6))
                pyramid_idx.append(-1)
            else:
                rois_p[i] = rois[self.feat_idx[i]]
                pyramid_idx.append(self.feat_idx[i])
        rois_idx = np.argsort(np.hstack(pyramid_idx))[-rois.shape[0]:]

        if is_train:
            for i in range(self.num_strides):
                self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i]))

            autograd.mark_variables(
                [in_data[i] for i in range(self.num_strides)],
                self.in_grad_hist_list)
            with autograd.train_section():
                for i in range(self.num_strides):
                    self.roi_pool[i] = mx.nd.contrib.ROIAlignRotated(
                        in_data[i],
                        mx.nd.array(rois_p[i], in_data[i].context), (7, 7),
                        spatial_scale=1.0 / self.feat_strides[i],
                        sample_ratio=4)
                    # self.roi_pool[i] = mx.nd.ROIPooling(in_data[i], mx.nd.array(rois_p[i], in_data[i].context), (7, 7), spatial_scale=1.0 / self.feat_strides[i])

            roi_pool = mx.nd.concatenate(self.roi_pool, axis=0)
        else:
            # during testing, there is no need to record variable, thus saving memory
            # pdb.set_trace()
            roi_pool = [None for _ in range(self.num_strides)]
            for i in range(self.num_strides):
                # roi_pool[i] = mx.nd.ROIPooling(in_data[i], mx.nd.array(rois_p[i], in_data[i].context), (7, 7), spatial_scale=1.0 / self.feat_strides[i])
                roi_pool[i] = mx.nd.contrib.ROIAlignRotated(
                    in_data[i],
                    mx.nd.array(rois_p[i], in_data[i].context), (7, 7),
                    spatial_scale=1.0 / self.feat_strides[i],
                    sample_ratio=4)
            roi_pool = mx.nd.concatenate(roi_pool, axis=0)
        # pdb.set_trace()
        roi_pool = mx.nd.take(roi_pool, mx.nd.array(rois_idx,
                                                    roi_pool.context))
        self.assign(out_data[0], req[0], roi_pool)
Esempio n. 3
0
    def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
        for i in range(len(in_grad)):
            self.assign(in_grad[i], req[i], 0)

        with autograd.train_section():
            for i in range(self.num_strides):
                if len(self.feat_idx[i] > 0):
                    autograd.compute_gradient([mx.nd.take(out_grad[0], mx.nd.array(self.feat_idx[i], out_grad[0].context)) * self.roi_pool[i]])

        if self.with_deformable:
            for i in range(0, self.num_strides * 3):
                self.assign(in_grad[i], req[i], self.in_grad_hist_list[i])
        else:
            for i in range(0, self.num_strides):
                self.assign(in_grad[i], req[i], self.in_grad_hist_list[i])

        gc.collect()
    def backward(self, req, out_grad, in_data, out_data, in_grad, aux):
        for i in range(len(in_grad)):
            self.assign(in_grad[i], req[i], 0)

        with autograd.train_section():
            for i in range(self.num_strides):
                if len(self.feat_idx[i] > 0):
                    autograd.compute_gradient([mx.nd.take(out_grad[0], mx.nd.array(self.feat_idx[i], out_grad[0].context)) * self.roi_pool[i]])

        if self.with_deformable:
            for i in range(0, self.num_strides * 3):
                self.assign(in_grad[i], req[i], self.in_grad_hist_list[i])
        else:
            for i in range(0, self.num_strides):
                self.assign(in_grad[i], req[i], self.in_grad_hist_list[i])

        gc.collect()
Esempio n. 5
0
    def forward(self, is_train, req, in_data, out_data, aux):
        rois = in_data[-1].asnumpy()
        w = rois[:, 3] - rois[:, 1] + 1
        h = rois[:, 4] - rois[:, 2] + 1

        #Leonid fix, just in case w / h will misbehave
        w = np.maximum(w, 1e-7)
        h = np.maximum(h, 1e-7)

        feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0,
                          len(self.feat_strides) - 1)
        pyramid_idx = []

        rois_p = [None for _ in range(self.num_strides)]
        for i in range(self.num_strides):
            self.feat_idx[i] = np.where(feat_id == i)[0]
            if len(self.feat_idx[i]) == 0:
                # padding dummy roi
                rois_p[i] = np.zeros((1, 5))
                pyramid_idx.append(-1)
            else:
                rois_p[i] = rois[self.feat_idx[i]]
                pyramid_idx.append(self.feat_idx[i])
        rois_idx = np.argsort(np.hstack(pyramid_idx))[-rois.shape[0]:]

        if is_train:
            for i in range(self.num_strides):
                self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i]))

            if self.with_deformable:
                for i in range(self.num_strides, self.num_strides * 3):
                    self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i]))
                autograd.mark_variables(
                    [in_data[i] for i in range(self.num_strides * 3)],
                    self.in_grad_hist_list)

                with autograd.train_section():
                    for i in range(self.num_strides):
                        roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(
                            data=in_data[i],
                            rois=mx.nd.array(rois_p[i], in_data[i].context),
                            group_size=1,
                            pooled_size=7,
                            sample_per_part=4,
                            no_trans=True,
                            part_size=7,
                            output_dim=256,
                            spatial_scale=1.0 / self.feat_strides[i])
                        roi_offset = mx.nd.FullyConnected(
                            data=roi_offset_t,
                            num_hidden=7 * 7 * 2,
                            weight=in_data[i * 2 + self.num_strides],
                            bias=in_data[i * 2 + 1 + self.num_strides])
                        roi_offset_reshape = mx.nd.reshape(data=roi_offset,
                                                           shape=(-1, 2, 7, 7))
                        self.roi_pool[
                            i] = mx.contrib.nd.DeformablePSROIPooling(
                                data=in_data[i],
                                rois=mx.nd.array(rois_p[i],
                                                 in_data[i].context),
                                trans=roi_offset_reshape,
                                group_size=1,
                                pooled_size=7,
                                sample_per_part=4,
                                no_trans=False,
                                part_size=7,
                                output_dim=self.output_dim,
                                spatial_scale=1.0 / self.feat_strides[i],
                                trans_std=0.1)
            else:
                autograd.mark_variables(
                    [in_data[i] for i in range(self.num_strides)],
                    self.in_grad_hist_list)
                with autograd.train_section():
                    for i in range(self.num_strides):
                        self.roi_pool[i] = mx.nd.ROIPooling(
                            in_data[i],
                            mx.nd.array(rois_p[i], in_data[i].context), (7, 7),
                            spatial_scale=1.0 / self.feat_strides[i])
            roi_pool = mx.nd.concatenate(self.roi_pool, axis=0)
        else:
            # during testing, there is no need to record variable, thus saving memory
            roi_pool = [None for _ in range(self.num_strides)]
            if self.with_deformable:
                for i in range(self.num_strides):
                    roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(
                        data=in_data[i],
                        rois=mx.nd.array(rois_p[i], in_data[i].context),
                        group_size=1,
                        pooled_size=7,
                        sample_per_part=4,
                        no_trans=True,
                        part_size=7,
                        output_dim=256,
                        spatial_scale=1.0 / self.feat_strides[i])
                    roi_offset = mx.nd.FullyConnected(
                        data=roi_offset_t,
                        num_hidden=7 * 7 * 2,
                        weight=in_data[i * 2 + self.num_strides],
                        bias=in_data[i * 2 + 1 + self.num_strides])
                    roi_offset_reshape = mx.nd.reshape(data=roi_offset,
                                                       shape=(-1, 2, 7, 7))
                    roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(
                        data=in_data[i],
                        rois=mx.nd.array(rois_p[i], in_data[i].context),
                        trans=roi_offset_reshape,
                        group_size=1,
                        pooled_size=7,
                        sample_per_part=4,
                        no_trans=False,
                        part_size=7,
                        output_dim=self.output_dim,
                        spatial_scale=1.0 / self.feat_strides[i],
                        trans_std=0.1)
            else:
                for i in range(self.num_strides):
                    roi_pool[i] = mx.nd.ROIPooling(
                        in_data[i],
                        mx.nd.array(rois_p[i], in_data[i].context), (7, 7),
                        spatial_scale=1.0 / self.feat_strides[i])

            roi_pool = mx.nd.concatenate(roi_pool, axis=0)

        roi_pool = mx.nd.take(roi_pool, mx.nd.array(rois_idx,
                                                    roi_pool.context))
        self.assign(out_data[0], req[0], roi_pool)
    def forward(self, is_train, req, in_data, out_data, aux):
        rois = in_data[-1].asnumpy()
        w = rois[:, 3] - rois[:, 1] + 1
        h = rois[:, 4] - rois[:, 2] + 1
        feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, len(self.feat_strides) - 1)
        pyramid_idx = []

        rois_p = [None for _ in range(self.num_strides)]
        for i in range(self.num_strides):
            self.feat_idx[i] = np.where(feat_id == i)[0]
            if len(self.feat_idx[i]) == 0:
                # padding dummy roi
                rois_p[i] = np.zeros((1, 5))
                pyramid_idx.append(-1)
            else:
                rois_p[i] = rois[self.feat_idx[i]]
                pyramid_idx.append(self.feat_idx[i])
        rois_idx = np.argsort(np.hstack(pyramid_idx))[-rois.shape[0]:]

        if is_train:
            for i in range(self.num_strides):
                self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i]))

            if self.with_deformable:
                for i in range(self.num_strides, self.num_strides * 3):
                    self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i]))
                autograd.mark_variables([in_data[i] for i in range(self.num_strides * 3)], self.in_grad_hist_list)

                with autograd.train_section():
                    for i in range(self.num_strides):
                        roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), group_size=1, pooled_size=7,
                                                                            sample_per_part=4, no_trans=True, part_size=7, output_dim=256, spatial_scale=1.0 / self.feat_strides[i])
                        roi_offset = mx.nd.FullyConnected(data=roi_offset_t, num_hidden=7 * 7 * 2, weight=in_data[i * 2 + self.num_strides], bias=in_data[i * 2 + 1 + self.num_strides])
                        roi_offset_reshape = mx.nd.reshape(data=roi_offset, shape=(-1, 2, 7, 7))
                        self.roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), trans=roi_offset_reshape,
                                                                                group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7,
                                                                                output_dim=self.output_dim, spatial_scale=1.0 / self.feat_strides[i], trans_std=0.1)
            else:
                autograd.mark_variables([in_data[i] for i in range(self.num_strides)], self.in_grad_hist_list)
                with autograd.train_section():
                    for i in range(self.num_strides):
                        self.roi_pool[i] = mx.nd.ROIPooling(in_data[i], mx.nd.array(rois_p[i], in_data[i].context), (7, 7), spatial_scale=1.0 / self.feat_strides[i])
            roi_pool = mx.nd.concatenate(self.roi_pool, axis=0)
        else:
            # during testing, there is no need to record variable, thus saving memory
            roi_pool = [None for _ in range(self.num_strides)]
            if self.with_deformable:
                for i in range(self.num_strides):
                    roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), group_size=1, pooled_size=7,
                                                                        sample_per_part=4, no_trans=True, part_size=7, output_dim=256, spatial_scale=1.0 / self.feat_strides[i])
                    roi_offset = mx.nd.FullyConnected(data=roi_offset_t, num_hidden=7 * 7 * 2, weight=in_data[i * 2 + self.num_strides], bias=in_data[i * 2 + 1 + self.num_strides])
                    roi_offset_reshape = mx.nd.reshape(data=roi_offset, shape=(-1, 2, 7, 7))
                    roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), trans=roi_offset_reshape,
                                                                       group_size=1, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7,
                                                                       output_dim=self.output_dim, spatial_scale=1.0 / self.feat_strides[i], trans_std=0.1)
            else:
                for i in range(self.num_strides):
                    roi_pool[i] = mx.nd.ROIPooling(in_data[i], mx.nd.array(rois_p[i], in_data[i].context), (7, 7), spatial_scale=1.0 / self.feat_strides[i])

            roi_pool = mx.nd.concatenate(roi_pool, axis=0)

        roi_pool = mx.nd.take(roi_pool, mx.nd.array(rois_idx, roi_pool.context))
        self.assign(out_data[0], req[0], roi_pool)
Esempio n. 7
0
    def forward(self, is_train, req, in_data, out_data, aux):
        rois = in_data[-1].asnumpy()
        w = rois[:, 3] - rois[:, 1] + 1
        h = rois[:, 4] - rois[:, 2] + 1
        feat_id = np.clip(np.floor(2 + np.log2(np.sqrt(w * h) / 224)), 0, len(self.feat_strides) - 1)
        pyramid_idx = []

        rois_p = [None for _ in range(self.num_strides)]
        for i in range(self.num_strides):
            self.feat_idx[i] = np.where(feat_id == i)[0]
            if len(self.feat_idx[i]) == 0:
                # padding dummy roi
                rois_p[i] = np.zeros((1, 5))
                pyramid_idx.append(-1)
            else:
                rois_p[i] = rois[self.feat_idx[i]]
                pyramid_idx.append(self.feat_idx[i])
        rois_idx = np.argsort(np.hstack(pyramid_idx))[-rois.shape[0]:]
        # pdb.set_trace()
        if is_train:
            for i in range(self.num_strides):
                self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i]))

            if self.pooling_mode == 'deform':
                for i in range(self.num_strides, self.num_strides * 3):
                    self.in_grad_hist_list.append(mx.nd.zeros_like(in_data[i]))
                autograd.mark_variables([in_data[i] for i in range(self.num_strides * 3)], self.in_grad_hist_list)

                with autograd.train_section():
                    for i in range(self.num_strides):
                        roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), group_size=7, pooled_size=7,
                                                                            sample_per_part=4, no_trans=True, part_size=7, output_dim=10, spatial_scale=1.0 / self.feat_strides[i])
                        roi_offset = mx.nd.FullyConnected(data=roi_offset_t, num_hidden=7 * 7 * 2, weight=in_data[i * 2 + self.num_strides], bias=in_data[i * 2 + 1 + self.num_strides])
                        roi_offset_reshape = mx.nd.reshape(data=roi_offset, shape=(-1, 2, 7, 7))
                        self.roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), trans=roi_offset_reshape,
                                                                                group_size=7, pooled_size=7, sample_per_part=4, no_trans=False, part_size=7,
                                                                                output_dim=self.output_dim, spatial_scale=1.0 / self.feat_strides[i], trans_std=0.1)
            elif self.pooling_mode == 'alignave':
                # pdb.set_trace()
                autograd.mark_variables([in_data[i] for i in range(self.num_strides)], self.in_grad_hist_list)
                with autograd.train_section():
                    for i in range(self.num_strides):
                        self.roi_pool[i] = mx.contrib.nd.PSROIALIGNAVEPooling(data=in_data[i],
                                                                      rois=mx.nd.array(rois_p[i], in_data[i].context),
                                                                      group_size=7, pooled_size=7, sampling_ratio=4,
                                                                      output_dim=10,
                                                                      spatial_scale=1.0 / self.feat_strides[i])
            elif self.pooling_mode == 'pooling':
                autograd.mark_variables([in_data[i] for i in range(self.num_strides)], self.in_grad_hist_list)
                with autograd.train_section():
                    for i in range(self.num_strides):
                        # TODO: finish it, and fix the output_dim hard code here

                        # self.roi_pool[i] = mx.nd.contrib.PSROIPooling(data=in_data[i], rois=mx.nd.array(rois_p[i], in_data[i].context), group_size=7, pooled_size=7,
                        #                                               output_dim=10, spatial_scale=1.0 / self.feat_strides[i])
                        self.roi_pool[i] = mx.contrib.nd.PSROIPooling(data=in_data[i],
                                                                      rois=mx.nd.array(rois_p[i], in_data[i].context),
                                                                      group_size=7, pooled_size=7,
                                                                      output_dim=10,
                                                                      spatial_scale=1.0 / self.feat_strides[i])
            else:
                print 'no such pooling mode'
                pdb.set_trace()
            roi_pool = mx.nd.concatenate(self.roi_pool, axis=0)
        else:
            # during testing, there is no need to record variable, thus saving memory
            # pdb.set_trace()
            roi_pool = [None for _ in range(self.num_strides)]
            if self.pooling_mode == 'deform':
                for i in range(self.num_strides):
                    roi_offset_t = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i],
                                                                        rois=mx.nd.array(rois_p[i], in_data[i].context),
                                                                        group_size=7, pooled_size=7,
                                                                        sample_per_part=4, no_trans=True, part_size=7,
                                                                        output_dim=10,
                                                                        spatial_scale=1.0 / self.feat_strides[i])
                    roi_offset = mx.nd.FullyConnected(data=roi_offset_t, num_hidden=7 * 7 * 2,
                                                      weight=in_data[i * 2 + self.num_strides],
                                                      bias=in_data[i * 2 + 1 + self.num_strides])
                    roi_offset_reshape = mx.nd.reshape(data=roi_offset, shape=(-1, 2, 7, 7))
                    roi_pool[i] = mx.contrib.nd.DeformablePSROIPooling(data=in_data[i],
                                                                       rois=mx.nd.array(rois_p[i], in_data[i].context),
                                                                       trans=roi_offset_reshape,
                                                                       group_size=7, pooled_size=7, sample_per_part=4,
                                                                       no_trans=False, part_size=7,
                                                                       output_dim=self.output_dim,
                                                                       spatial_scale=1.0 / self.feat_strides[i],
                                                                    trans_std=0.1)
            elif self.pooling_mode == 'alignave':
                for i in range(self.num_strides):

                    roi_pool[i] = mx.contrib.nd.PSROIALIGNAVEPooling(data=in_data[i],
                                                                  rois=mx.nd.array(rois_p[i], in_data[i].context),
                                                                  group_size=7, pooled_size=7, sampling_ratio=4,
                                                                  output_dim=10,
                                                                  spatial_scale=1.0 / self.feat_strides[i])
            elif self.pooling_mode == 'pooling':
                for i in range(self.num_strides):

                    roi_pool[i] = mx.contrib.nd.PSROIPooling(data=in_data[i],
                                                                  rois=mx.nd.array(rois_p[i], in_data[i].context),
                                                                  group_size=7, pooled_size=7,
                                                                  output_dim=10,
                                                                  spatial_scale=1.0 / self.feat_strides[i])
            else:
                print 'no such pooling mode'
                pdb.set_trace()
            roi_pool = mx.nd.concatenate(roi_pool, axis=0)

        roi_pool = mx.nd.take(roi_pool, mx.nd.array(rois_idx, roi_pool.context))
        self.assign(out_data[0], req[0], roi_pool)
Esempio n. 8
0
for epoch in range(opt.niter):
    train_iter.reset()
    for batch in train_iter:
        train_G = sched.next()
        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        ###########################
        # train with real_t
        data = batch.data[0].copyto(ctx) / 255. * 2. - 1.
        noise = mx.nd.random_normal(0,
                                    1,
                                    shape=(opt.batchSize, nz, 1, 1),
                                    ctx=ctx)

        with autograd.train_section():
            errD_real = netD(data)
            #output = output.reshape((opt.batchSize, 2))
            #errD_real = nn.loss.softmax_cross_entropy_loss(output, real_label)

            fake = netG(noise)
            errD_fake = netD(fake.detach())
            #output = output.reshape((opt.batchSize, 2))
            #errD_fake = nn.loss.softmax_cross_entropy_loss(output, fake_label)
            errD = errD_real - errD_fake
            errD.backward()

        optimizerD.step(opt.batchSize)
        for p in netD.params.values():
            p.set_data(mx.nd.clip(p.data(ctx=ctx), -0.01, 0.01))
        print('D: %.6f (%.6f, %.6f)' %
Esempio n. 9
0
    return train, val

train_iter, val_iter = cifar10_iterator(opt.batchSize, (3, 64, 64), 64)

for epoch in range(opt.niter):
    train_iter.reset()
    for batch in train_iter:
        train_G = sched.next()
        ############################
        # (1) Update D network: maximize log(D(x)) + log(1 - D(G(z)))
        ###########################
        # train with real_t
        data = batch.data[0].copyto(ctx) / 255. * 2. - 1.
        noise = mx.nd.random_normal(0, 1, shape=(opt.batchSize, nz, 1, 1), ctx=ctx)

        with autograd.train_section():
            errD_real = netD(data)
            #output = output.reshape((opt.batchSize, 2))
            #errD_real = nn.loss.softmax_cross_entropy_loss(output, real_label)

            fake = netG(noise)
            errD_fake = netD(fake.detach())
            #output = output.reshape((opt.batchSize, 2))
            #errD_fake = nn.loss.softmax_cross_entropy_loss(output, fake_label)
            errD = errD_real - errD_fake
            errD.backward()

        optimizerD.step(opt.batchSize)
        for p in netD.params.values():
            p.set_data(mx.nd.clip(p.data(ctx=ctx), -0.01, 0.01))
        print('D: %.6f (%.6f, %.6f)' % (