예제 #1
0
    def plot_on_graph (self):

        lineList = []
        lineList = self.ax.get_lines()

        i = 0
        for dataSet in self.data:
            plt.pause(self.pause)
            lineList[i].set_xdata(self.axis)
            lineList[i].set_ydata(self.data[i])
            plt.xticks(self.axis)
            i += 1  

        newscale = self.ax.get_lines()
        self.scaleList = []

        i = 0 
        for line in newscale:
            vis = newscale[i].get_visible()
            if vis:
                current = newscale[i].get_data()
                self.scaleList.extend(current[1])
            i += 1
            
        low = min(set(self.scaleList)) - (abs(min(set(self.scaleList))) * 0.15)
        high = max(set(self.scaleList)) + (abs(max(set(self.scaleList))) * 0.15)
        self.ax.set_ylim(low, high)
        plt.draw()
        
        self.lines = lineList
        self.lined = dict()
        for legline, origline in zip(self.leg.get_lines(), self.lines):
            legline.set_picker(5)  # 5 pts tolerance
            self.lined[legline] = origline          
    def __init__(self,
                 ref_params,
                 save_state_freq=500,
                 overwrite_state=True,
                 plot=False):
        """

        :param ref_params: instance of refinement Parameters (LMP in code below)
        :param save_state_freq: how often to save all models (will be overwritten each time)
        """
        num_params = len(ref_params)
        self.vary = np.zeros(num_params).astype(bool)
        for p in ref_params.values():
            self.vary[p.xpos] = not p.fix
        self.x0 = np.ones(num_params)
        self.g = None
        self.ref_params = ref_params
        self.iternum = 0
        self.all_times = []
        self.save_state_freq = save_state_freq
        self.overwrite_state = overwrite_state
        self.med_offsets = [
        ]  # median prediction offsets(new number gets added everytime write_output_files is called)
        self.med_iternums = []
        self.plot = plot and COMM.rank == 0
        if self.plot:
            self.fig = plt.figure()
            self.ax = plt.gca()
            plt.draw()
            plt.pause(0.1)
    def do_plot():
        if solver.iter % display == 0:

            loss[solver.iter] = solver.net.blobs['loss3/loss3'].data.copy()
            loss_disp = 'loss=' + str(loss[solver.iter])

            print '%3d) %s' % (solver.iter, loss_disp)

            train_loss[solver.iter / display] = loss[solver.iter]
            ax1.plot(it_axes[0:solver.iter / display],
                     train_loss[0:solver.iter / display], 'r')
            # if it > test_interval:
            #     ax1.plot(it_val_axes[0:it/test_interval], val_loss[0:it/test_interval], 'g') #Val always on top
            ax1.set_ylim([5, 7])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)

            # VALIDATE Validation done this way only uses 1 GPU
        if solver.iter % test_interval == 0 and solver.iter > 0:
            loss_val = 0
            for i in range(test_iters):
                solver.test_nets[0].forward()
                loss_val += solver.test_nets[0].blobs['loss3/loss3'].data
            loss_val /= test_iters
            print("Val loss: {:.3f}".format(loss_val))

            val_loss[solver.iter / test_interval - 1] = loss_val
            ax1.plot(it_val_axes[0:solver.iter / test_interval],
                     val_loss[0:solver.iter / test_interval], 'g')
            ax1.set_ylim([5, 7])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            title = '../../../datasets/SocialMedia/models/training/' + training_id + str(
                solver.iter) + '.png'  # Save graph to disk
            savefig(title, bbox_inches='tight')
예제 #4
0
def draw(weightvect):
    """ function that draw the graph so the line that separate the points from the special point
    """
    axeofx = [-1, 2]
    axeofy = calculate(axeofx, weightvect)

    plt.xlim(-1, 2)
    plt.ylim(-1, 2)
    plt.plot(axeofx, axeofy, color="black")
    pointx = [0, 0, 1]
    pointy = [0, 1, 0]
    plt.scatter(pointx, pointy, color="blue")
    plt.scatter(1, 1, color="red")
    plt.ylabel("x2")
    plt.xlabel("x1")
    title1 = "Weight Vector : " + str(weightvect)
    plt.title(title1, backgroundcolor ="green",color="black")
    plt.pause(0.2)
    plt.cla()

    return 0
    def __call__(self, x, *args, **kwargs):
        self.iternum += 1
        t = time.time()
        self.x0[self.vary] = x
        #time_per_iter = (time.time()-self.tstart) / self.iternum

        f, self.g, self.sigmaZ = target_and_grad(self.x0, self.ref_params,
                                                 *args, **kwargs)
        t = time.time() - t
        if COMM.rank == 0:
            self.all_times.append(t)
            time_per_iter = np.mean(self.all_times)
            pred_offset_str = ", ".join(
                map(lambda x: "%.4f" % x, self.med_offsets))
            print(
                "Iteration %d:\n\tResid=%f, sigmaZ %f, t-per-iter=%.4f sec, pred_offsets=%s"
                %
                (self.iternum, f, self.sigmaZ, time_per_iter, pred_offset_str),
                flush=True)
        if self.iternum % self.save_state_freq == 0 and self.iternum > 0:
            if not self.overwrite_state:
                params = args[-1]  # phil params
                temp_pandas_dir = params.geometry.pandas_dir
                params.geometry.pandas_dir = params.geometry.pandas_dir + "-iter%d" % self.iternum
            med_offset = write_output_files(self.x0, self.ref_params, *args,
                                            **kwargs)
            self.med_offsets.append(med_offset)
            self.med_iternums.append(self.iternum)
            if self.plot:
                self.ax.clear()
                self.ax.plot(self.med_iternums, self.med_offsets)
                self.ax.set_ylabel("median |xobs-xcal| (pixels)")
                self.ax.set_xlabel("iteration #")
                plt.draw()
                plt.pause(0.01)
            if not self.overwrite_state:
                params.geometry.pandas_dir = temp_pandas_dir
        return f
예제 #6
0
def do_solve(niter, solvers, disp_interval, test_interval, test_iters):
    """Run solvers for niter iterations,
       returning the loss and recorded each iteration.
       `solvers` is a list of (name, solver) tuples."""

    import tempfile
    import numpy as np
    import os
    from pylab import zeros, arange, subplots, plt, savefig
    import time

    # SET PLOTS DATA
    train_loss = zeros(niter / disp_interval)
    val_loss = zeros(niter / test_interval)

    it_axes = (arange(niter) * disp_interval) + disp_interval
    it_val_axes = (arange(niter) * test_interval) + test_interval

    _, ax1 = subplots()
    # ax2 = ax1.twinx()
    ax1.set_xlabel('iteration')
    ax1.set_ylabel('train loss (r), val loss (g)')
    # ax2.set_ylabel('val loss (g)')
    # ax2.set_autoscaley_on(False)
    # ax2.set_ylim([0, 1])

    loss = {name: np.zeros(niter) for name, _ in solvers}

    #RUN TRAINING
    for it in range(niter):
        for name, s in solvers:
            # start = time.time()
            s.step(1)  # run a single SGD step in Caffe
            # end = time.time()
            # print "Time step: " + str((end - start))
            loss[name][it] = s.net.blobs['loss3/loss3'].data.copy()

        #PLOT
        if it % disp_interval == 0 or it + 1 == niter:
            loss_disp = 'loss=' + str(loss['my_solver'][it])

            print '%3d) %s' % (it, loss_disp)

            train_loss[it / disp_interval] = loss['my_solver'][it]

            ax1.plot(it_axes[0:it / disp_interval],
                     train_loss[0:it / disp_interval], 'r')
            ax1.set_ylim([4, 7])
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            # title = '../training/numbers/training-' + str(it) + '.png'  # Save graph to disk
            # savefig(title, bbox_inches='tight')

        #VALIDATE
        if it % test_interval == 0 and it > 0:
            loss_val = 0
            for i in range(test_iters):
                solvers[0][1].test_nets[0].forward()
                loss_val += solvers[0][1].test_nets[0].blobs[
                    'loss3/loss3'].data
            loss_val /= test_iters
            print("Val loss: {:.3f}".format(loss_val))

            val_loss[it / test_interval - 1] = loss_val
            ax1.plot(it_val_axes[0:it / test_interval],
                     val_loss[0:it / test_interval], 'g')
            ax1.set_ylim([4, 7])
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            title = '../../../datasets/recipes5k/models/training/training-ingredients_Inception_frozen_500_raw' + str(
                it) + '.png'  # Save graph to disk
            savefig(title, bbox_inches='tight')

    #Save the learned weights from both nets at the end of the training
    weight_dir = tempfile.mkdtemp()
    weights = {}
    for name, s in solvers:
        filename = 'weights.%s.caffemodel' % name
        weights[name] = os.path.join(weight_dir, filename)
        s.net.save(weights[name])

    return loss, weights
예제 #7
0
def do_solve(maxIter, solver, display, test_interval, test_iters):

    # SET PLOTS DATA
    train_loss_C = zeros(maxIter/display)
    train_top1 = zeros(maxIter/display)
    train_top5 = zeros(maxIter/display)

    val_loss_C = zeros(maxIter/test_interval)
    val_top1 = zeros(maxIter/test_interval)
    val_top5 = zeros(maxIter/test_interval)


    it_axes = (arange(maxIter) * display) + display
    it_val_axes = (arange(maxIter) * test_interval) + test_interval

    _, ax1 = subplots()
    ax2 = ax1.twinx()
    ax1.set_xlabel('iteration')
    ax1.set_ylabel('train loss C (r), val loss C (y)')
    ax2.set_ylabel('train TOP1 (b), val TOP1 (g), train TOP-5 (c), val TOP-5 (k)')
    ax2.set_autoscaley_on(False)
    ax2.set_ylim([0, 1])

    lossC = np.zeros(maxIter)
    acc1 = np.zeros(maxIter)
    acc5 = np.zeros(maxIter)


    #RUN TRAINING
    for it in range(niter):
        #st = time.time()
        solver.step(1)  # run a single SGD step in Caffepy()
        #en = time.time()
        #print "Time step: " + str((en-st))

        #PLOT
        if it % display == 0 or it + 1 == niter:
            lossC[it] = solver.net.blobs['loss3/loss3'].data.copy()
            acc1[it] = solver.net.blobs['loss3/top-1'].data.copy()
            acc5[it] = solver.net.blobs['loss3/top-5'].data.copy()

            loss_disp = 'loss3C= ' + str(lossC[it]) +  '  top-1= ' + str(acc1[it])

            print '%3d) %s' % (it, loss_disp)

            train_loss_C[it / display] = lossC[it]
            train_top1[it / display] = acc1[it]
            train_top5[it / display] = acc5[it]

            ax1.plot(it_axes[0:it / display], train_loss_C[0:it / display], 'r')
            ax2.plot(it_axes[0:it / display], train_top1[0:it / display], 'b')
            ax2.plot(it_axes[0:it / display], train_top5[0:it / display], 'c')

            #ax1.set_ylim([0, 10])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)

        #VALIDATE
        if it % test_interval == 0 and it > 0:
            loss_val_C = 0
            top1_val = 0
            top5_val = 0
            for i in range(test_iters):
                solver.test_nets[0].forward()
                loss_val_C += solver.test_nets[0].blobs['loss3/loss3'].data
                top1_val += solver.test_nets[0].blobs['loss3/top-1'].data
                top5_val += solver.test_nets[0].blobs['loss3/top-5'].data


            loss_val_C /= test_iters
            top1_val /= test_iters
            top5_val /= test_iters


            print("Val loss C: {:.3f}".format(loss_val_C))

            val_loss_C[it / test_interval - 1] = loss_val_C
            val_top1[it / test_interval - 1] = top1_val
            val_top5[it / test_interval - 1] = top5_val


            ax1.plot(it_val_axes[0:it / test_interval], val_loss_C[0:it/ test_interval], 'y')
            ax2.plot(it_val_axes[0:it / test_interval], val_top1[0:it / test_interval], 'g')
            ax2.plot(it_val_axes[0:it / test_interval], val_top5[0:it / test_interval], 'k')


            #ax1.set_ylim([0, 10])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            title = '../../../datasets/WebVision/models/training/' + training_id + str(it) + '.png'  # Save graph to disk
            savefig(title, bbox_inches='tight')

    return
예제 #8
0
    def do_plot():
        if solver.iter % display == 0:

            lossC[solver.iter] = solver.net.blobs['loss3/loss3'].data.copy()
            lossR[solver.iter] = solver.net.blobs['loss3/loss3/R'].data.copy()
            acc1[solver.iter] = solver.net.blobs['loss3/top-1'].data.copy()
            acc5[solver.iter] = solver.net.blobs['loss2/top-5'].data.copy()

            loss_disp = 'loss3C= ' + str(lossC[solver.iter]) +  '  loss3R= ' + str(lossR[solver.iter]) + '  top-1= ' + str(acc1[solver.iter]) 

            print '%3d) %s' % (solver.iter, loss_disp)

            train_loss_C[solver.iter / display] = lossC[solver.iter]
            train_loss_R[solver.iter / display] = lossR[solver.iter]
            train_top1[solver.iter / display] = acc1[solver.iter]
            train_top5[solver.iter / display] = acc5[solver.iter]

            ax1.plot(it_axes[0:solver.iter / display], train_loss_C[0:solver.iter / display], 'r')
            ax1.plot(it_axes[0:solver.iter / display], train_loss_R[0:solver.iter / display], 'm')
            ax2.plot(it_axes[0:solver.iter / display], train_top1[0:solver.iter / display], 'b')
            ax2.plot(it_axes[0:solver.iter / display], train_top5[0:solver.iter / display], 'c')

            ax1.set_ylim([0, 10])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)

            # VALIDATE Validation done this way only uses 1 GPU
        if solver.iter % test_interval == 0 and solver.iter > 0:
            loss_val_R = 0
            loss_val_C = 0
            top1_val = 0
            for i in range(test_iters):
                solver.test_nets[0].forward()
                loss_val_C += solver.test_nets[0].blobs['loss3/loss3'].data
                loss_val_R += solver.test_nets[0].blobs['loss3/loss3/R'].data
                top1_val += solver.test_nets[0].blobs['loss3/top-1'].data

            loss_val_C /= test_iters
            loss_val_R /= test_iters
            top1_val /= test_iters

            print("Val loss C: {:.3f}".format(loss_val_C))

            val_loss_C[solver.iter / test_interval - 1] = loss_val_C
            val_loss_R[solver.iter / test_interval - 1] = loss_val_R
            val_top1[solver.iter / test_interval - 1] = top1_val

            ax1.plot(it_val_axes[0:solver.iter / test_interval], val_loss_C[0:solver.iter / test_interval], 'y')
            ax1.plot(it_val_axes[0:solver.iter / test_interval], val_loss_R[0:solver.iter / test_interval], 'k')
            ax2.plot(it_val_axes[0:solver.iter / test_interval], val_top1[0:solver.iter / test_interval], 'g')

            ax1.set_ylim([0, 10])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            title = '../../../datasets/WebVision/models/training/' + training_id + str(
                solver.iter) + '.png'  # Save graph to disk
            savefig(title, bbox_inches='tight')
예제 #9
0
def do_solve(niter, solvers, disp_interval, test_interval, test_iters):
    """Run solvers for niter iterations,
       returning the loss and accuracy recorded each iteration.
       `solvers` is a list of (name, solver) tuples."""

    import tempfile
    import numpy as np
    import os
    from pylab import zeros, arange, subplots, plt, savefig
    import time

    # SET PLOTS DATA
    train_loss = zeros(niter/disp_interval)
    train_acc = zeros(niter/disp_interval)
    val_acc = zeros(niter/test_interval)

    it_axes = (arange(niter) * disp_interval) + disp_interval
    it_val_axes = (arange(niter) * test_interval) + test_interval

    _, ax1 = subplots()
    ax2 = ax1.twinx()
    ax1.set_xlabel('iteration')
    ax1.set_ylabel('train loss (r)')
    ax2.set_ylabel('train accuracy (b), val accuracy (g)')
    ax2.set_autoscaley_on(False)
    ax2.set_ylim([0, 1])

    blobs = ('loss','acc')
    loss, acc = ({name: np.zeros(niter) for name, _ in solvers}
                 for _ in blobs)


    #RUN TRAINING
    for it in range(niter):
        for name, s in solvers:


            # print "FC7 data"
            # print s.net.blobs['fc7'].data
            # print "Classifier weights"
            # print s.net.params['classifier'][0].data
            # print "Classifier data"
            # print s.net.blobs['classifier'].data

            # start = time.time()
            s.step(1)  # run a single SGD step in Caffe
            # end = time.time()
            # print "Time step: " + str((end - start))
            loss[name][it], acc[name][it] = (s.net.blobs[b].data.copy()
                                             for b in blobs)




        #PLOT
        if it % disp_interval == 0 or it + 1 == niter:
            loss_disp = '; '.join('%s: loss=%.3f, acc=%2d%%' %
                                  (n, loss[n][it], np.round(100*acc[n][it]))
                                  for n, _ in solvers)
            print '%3d) %s' % (it, loss_disp)

            train_loss[it/disp_interval] = loss['my_solver'][it]
            train_acc[it/disp_interval] = acc['my_solver'][it]

            ax1.plot(it_axes[0:it/disp_interval], train_loss[0:it/disp_interval], 'r')
            ax2.plot(it_axes[0:it/disp_interval], train_acc[0:it/disp_interval], 'b')
            plt.ion()
            plt.show()
            plt.pause(0.001)
            # title = '../training/numbers/training-' + str(it) + '.png'  # Save graph to disk
            # savefig(title, bbox_inches='tight')

        #VALIDATE
        if it % test_interval == 0 and it > 0:
            accuracy = 0
            for i in range(test_iters):
                solvers[0][1].test_nets[0].forward()
                accuracy += solvers[0][1].test_nets[0].blobs['acc'].data
            accuracy /= test_iters
            print("Test Accuracy: {:.3f}".format(accuracy))

            val_acc[it/test_interval - 1] = accuracy
            ax2.plot(it_val_axes[0:it/test_interval], val_acc[0:it/test_interval], 'g')
            plt.ion()
            plt.show()
            plt.pause(0.001)
            title = '../../../datasets/SocialMedia/models/training/training-' + str(it) + '.png'  # Save graph to disk
            savefig(title, bbox_inches='tight')


    #Save the learned weights from both nets at the end of the training
    weight_dir = tempfile.mkdtemp()
    weights = {}
    for name, s in solvers:
        filename = 'weights.%s.caffemodel' % name
        weights[name] = os.path.join(weight_dir, filename)
        s.net.save(weights[name])

    return loss, acc, weights
예제 #10
0
def get_linear_model_histogramDouble(code,
                                     ptype='low',
                                     dtype='d',
                                     start=None,
                                     end=None,
                                     vtype='f',
                                     filter='n',
                                     df=None,
                                     dl=None):
    # 399001','cyb':'zs399006','zxb':'zs399005
    # code = '999999'
    # code = '601608'
    # code = '000002'
    # asset = get_kdate_data(code)['close'].sort_index(ascending=True)
    # df = tdd.get_tdx_Exp_day_to_df(code, 'f').sort_index(ascending=True)
    # ptype='close'
    # if ptype == 'close' or ptype==''
    # ptype=
    if start is not None and filter == 'y':
        if code not in ['999999', '399006', '399001']:
            index_d, dl = tdd.get_duration_Index_date(dt=start)
            log.debug("index_d:%s dl:%s" % (str(index_d), dl))
        else:
            index_d = cct.day8_to_day10(start)
            log.debug("index_d:%s" % (index_d))
        start = tdd.get_duration_price_date(code, ptype='low', dt=index_d)
        log.debug("start:%s" % (start))

    if start is None and df is None and dl is not None:
        start = cct.last_tddate(dl)
        # print start
        df = tdd.get_tdx_append_now_df_api(code, start=start,
                                           end=end).sort_index(ascending=True)

    if df is None:
        # df = tdd.get_tdx_append_now_df(code, ptype, start, end).sort_index(ascending=True)
        df = tdd.get_tdx_append_now_df_api(code, start,
                                           end).sort_index(ascending=True)
    if not dtype == 'd':
        df = tdd.get_tdx_stock_period_to_type(df,
                                              dtype).sort_index(ascending=True)

    if len(df) == 0:
        raise Exception("Code:%s error, df is None" % (code))
    asset = df[ptype].round(2)
    log.info("df:%s" % asset[:1])
    asset = asset.dropna()
    dates = asset.index

    if not code.startswith('999') and not code.startswith('399'):
        # print "code:",code
        if code[:1] in ['5', '6', '9']:
            code2 = '999999'
        elif code[:2] in ['30']:
            # print "cyb"
            code2 = '399006'
        else:
            code2 = '399001'
        df1 = tdd.get_tdx_append_now_df_api(code2, start,
                                            end).sort_index(ascending=True)
        # df1 = tdd.get_tdx_append_now_df(code2, ptype, start, end).sort_index(ascending=True)
        if not dtype == 'd':
            df1 = tdd.get_tdx_stock_period_to_type(
                df1, dtype).sort_index(ascending=True)
            # if len(asset) < len(df1):
            # asset1 = df1.loc[asset.index, ptype]
            # else:
            # asset1 = df1.loc[asset.index, ptype]
        # startv = asset1[:1]
        # asset1 = asset1.apply(lambda x: round(x / asset1[:1], 2))
        # print asset[:1].index[0] , df1[:1].index[0]
        if asset[:1].index[0] > df1[:1].index[0]:
            asset1 = df1.loc[asset.index, ptype]
            startv = asset1[:1]
            asset1 = asset1.apply(lambda x: round(x / asset1[:1], 2))
        else:
            df = df[df.index >= df1.index[0]]
            asset = df[ptype]
            asset = asset.dropna()
            dates = asset.index
            asset1 = df1.loc[df.index, ptype]
            asset1 = asset1.apply(lambda x: round(x / asset1[:1], 2))

    else:
        if code.startswith('399001'):
            code2 = '399006'
        elif code.startswith('399006'):
            code2 = '399005'
        else:
            code2 = '399006'
        if code2.startswith('3990'):
            df1 = tdd.get_tdx_append_now_df_api(code2, start,
                                                end).sort_index(ascending=True)
            if len(df1) < int(len(df) / 4):
                code2 = '399001'
                df1 = tdd.get_tdx_append_now_df_api(
                    code2, start, end).sort_index(ascending=True)

        # df1 = tdd.get_tdx_append_now_df(code2, ptype, start, end).sort_index(ascending=True)
        if not dtype == 'd':
            df1 = tdd.get_tdx_stock_period_to_type(
                df1, dtype).sort_index(ascending=True)
        if len(asset) < len(df1):
            asset1 = df1.loc[asset.index, ptype]
            asset1 = asset1.apply(lambda x: round(x / asset1[:1], 2))
        else:

            df = df[df.index >= df1.index[0]]
            asset = df[ptype]
            asset = asset.dropna()
            dates = asset.index
            asset1 = df1.loc[df.index, ptype]
            asset1 = asset1.apply(lambda x: round(x / asset1[:1], 2))
    # print len(df),len(asset),len(df1),len(asset1)

    if end is not None:
        # print asset[-1:]
        asset = asset[:-1]
        dates = asset.index
        asset1 = asset1[:-1]
        asset1 = asset1.apply(lambda x: round(x / asset1[:1], 2))

    # 画出价格随时间变化的图像
    # _, ax = plt.subplots()
    # fig = plt.figure()
    # plt.ion()
    fig = plt.figure(figsize=(16, 10))

    # fig = plt.figure(figsize=(16, 10), dpi=72)
    # fig.autofmt_xdate() #(no fact)

    # plt.subplots_adjust(bottom=0.1, right=0.8, top=0.9)
    plt.subplots_adjust(left=0.05,
                        bottom=0.08,
                        right=0.95,
                        top=0.95,
                        wspace=0.15,
                        hspace=0.25)
    # set (gca,'Position',[0,0,512,512])
    # fig.set_size_inches(18.5, 10.5)
    # fig=plt.fig(figsize=(14,8))
    ax1 = fig.add_subplot(321)
    # asset=asset.apply(lambda x:round( x/asset[:1],2))
    ax1.plot(asset)
    # ax1.plot(asset1,'-r', linewidth=2)
    ticks = ax1.get_xticks()
    # start, end = ax1.get_xlim()
    # print start, end, len(asset)
    # print ticks, ticks[:-1]
    # (ticks[:-1] if len(asset) > end else np.append(ticks[:-1], len(asset) - 1))

    ax1.set_xticklabels(
        [dates[int(i)] for i in (np.append(ticks[:-1],
                                           len(asset) - 1))],
        rotation=15)  # Label x-axis with dates
    # 拟合
    X = np.arange(len(asset))
    x = sm.add_constant(X)
    model = regression.linear_model.OLS(asset, x).fit()
    a = model.params[0]
    b = model.params[1]
    # log.info("a:%s b:%s" % (a, b))
    log.info("X:%s a:%s b:%s" % (len(asset), a, b))
    Y_hat = X * b + a

    # 真实值-拟合值,差值最大最小作为价值波动区间
    # 向下平移
    i = (asset.values.T - Y_hat).argmin()
    c_low = X[i] * b + a - asset.values[i]
    Y_hatlow = X * b + a - c_low

    # 向上平移
    i = (asset.values.T - Y_hat).argmax()
    c_high = X[i] * b + a - asset.values[i]
    Y_hathigh = X * b + a - c_high
    plt.plot(X, Y_hat, 'k', alpha=0.9)
    plt.plot(X, Y_hatlow, 'r', alpha=0.9)
    plt.plot(X, Y_hathigh, 'r', alpha=0.9)
    # plt.xlabel('Date', fontsize=12)
    plt.ylabel('Price', fontsize=12)
    plt.title(code + " | " + str(dates[-1])[:11], fontsize=14)
    plt.legend([asset.iat[-1]], fontsize=12, loc=4)
    plt.grid(True)

    # #plot volume
    # pad = 0.25
    # yl = ax1.get_ylim()
    # ax1.set_ylim(yl[0]-(yl[1]-yl[0])*pad,yl[1])
    # axx = ax1.twinx()
    # axx.set_position(transforms.Bbox([[0.125,0.1],[0.9,0.32]]))
    # volume = np.asarray(df.vol)
    # pos = df['open']-df['close']<0
    # neg = df['open']-df['close']>=0
    # idx = np.asarray([x for x in range(len(df))])
    # axx.bar(idx[pos],volume[pos],color='red',width=1,align='center')
    # axx.bar(idx[neg],volume[neg],color='green',width=1,align='center')

    # plt.legend([code]);
    # plt.legend([code, 'Value center line', 'Value interval line']);
    # fig=plt.fig()
    # fig.figsize = [14,8]
    scale = 1.1
    zp = zoompan.ZoomPan()
    figZoom = zp.zoom_factory(ax1, base_scale=scale)
    figPan = zp.pan_factory(ax1)

    # 将Y-Y_hat股价偏离中枢线的距离单画出一张图显示,对其边界线之间的区域进行均分,大于0的区间为高估,小于0的区间为低估,0为价值中枢线。
    ax3 = fig.add_subplot(322)
    # distance = (asset.values.T - Y_hat)
    distance = (asset.values.T - Y_hat)[0]
    # if code.startswith('999') or code.startswith('399'):
    if len(asset) > len(df1):
        ax3.plot(asset)
        plt.plot(distance)
        ticks = ax3.get_xticks()
        ax3.set_xticklabels(
            [dates[int(i)] for i in (np.append(ticks[:-1],
                                               len(asset) - 1))],
            rotation=15)
        n = 5
        d = (-c_high + c_low) / n
        c = c_high
        while c <= c_low:
            Y = X * b + a - c
            plt.plot(X, Y - Y_hat, 'r', alpha=0.9)
            c = c + d
        ax3.plot(asset)
        ## plt.xlabel('Date', fontsize=12)
        plt.ylabel('Price-center price', fontsize=14)
        plt.grid(True)
    else:
        as3 = asset.apply(lambda x: round(x / asset[:1], 2))
        ax3.plot(as3)
        ticks = ax3.get_xticks()
        ax3.plot(asset1, '-r', linewidth=2)

        # show volume bar !!!
        # assvol = df.loc[asset.index]['vol']
        # assvol = assvol.apply(lambda x: round(x / assvol[:1], 2))
        # ax3.plot(assvol, '-g', linewidth=0.5)

        ax3.set_xticklabels(
            [dates[int(i)] for i in (np.append(ticks[:-1],
                                               len(asset) - 1))],
            rotation=15)
        plt.grid(True)
        zp3 = zoompan.ZoomPan()
        figZoom = zp3.zoom_factory(ax3, base_scale=scale)
        figPan = zp3.pan_factory(ax3)
    # plt.title(code, fontsize=14)

    if 'name' in df.columns:
        plt.legend([df.name.values[-1:][0], df1.name.values[-1:][0]], loc=0)
    else:
        if code not in ['999999', '399006', '399001']:
            indexIdx = False
        else:
            indexIdx = True
        dm = tdd.get_sina_data_df(code, index=indexIdx)
        if 'name' in dm.columns:
            cname = dm.name[0]
        else:
            cname = '-'
        # plt.legend([code, code2], loc=0)
        plt.legend([cname, code2], loc=0)

    ax2 = fig.add_subplot(323)
    # ax2.plot(asset)
    # ticks = ax2.get_xticks()
    ax2.set_xticklabels(
        [dates[int(i)] for i in (np.append(ticks[:-1],
                                           len(asset) - 1))],
        rotation=15)
    # plt.plot(X, Y_hat, 'k', alpha=0.9)
    n = 5
    d = (-c_high + c_low) / n
    c = c_high
    while c <= c_low:
        Y = X * b + a - c
        plt.plot(X, Y, 'r', alpha=0.9)
        c = c + d
    # asset=asset.apply(lambda x:round(x/asset[:1],2))
    ax2.plot(asset)
    # ax2.plot(asset1,'-r', linewidth=2)
    # plt.xlabel('Date', fontsize=12)
    plt.ylabel('Price', fontsize=12)
    plt.grid(True)

    # plt.title(code, fontsize=14)
    # plt.legend([code])

    if len(df) > 10:
        ax6 = fig.add_subplot(324)
        h = df.loc[:, ['open', 'close', 'high', 'low']]
        highp = h['high'].values
        lowp = h['low'].values
        openp = h['open'].values
        closep = h['close'].values
        # print len(closep)
        lr = LinearRegression()
        x = np.atleast_2d(np.linspace(0, len(closep), len(closep))).T
        lr.fit(x, closep)
        LinearRegression(copy_X=True,
                         fit_intercept=True,
                         n_jobs=1,
                         normalize=False)
        xt = np.atleast_2d(np.linspace(0,
                                       len(closep) + 200,
                                       len(closep) + 200)).T
        yt = lr.predict(xt)
        bV = []
        bP = []
        for i in range(1, len(highp) - 1):
            if highp[i] <= highp[i - 1] and highp[i] < highp[
                    i + 1] and lowp[i] <= lowp[i - 1] and lowp[i] < lowp[i +
                                                                         1]:
                bV.append(lowp[i])
                bP.append(i)
            else:
                bV.append(lowp[i - 1])
                bP.append(i - 1)
        if len(bV) > 0:

            d, p = LIS(bV)

            idx = []
            for i in range(len(p)):
                idx.append(bP[p[i]])
            lr = LinearRegression()
            X = np.atleast_2d(np.array(idx)).T
            Y = np.array(d)
            lr.fit(X, Y)
            estV = lr.predict(xt)
            ax6.plot(closep, linewidth=2)
            ax6.plot(idx, d, 'ko')
            ax6.plot(xt, estV, '-r', linewidth=3)
            ax6.plot(xt, yt, '-g', linewidth=3)
            plt.grid(True)

            # plt.tight_layout()
            zp2 = zoompan.ZoomPan()
            figZoom = zp2.zoom_factory(ax6, base_scale=scale)
            figPan = zp2.pan_factory(ax6)

    # 统计出每个区域内各股价的频数,得到直方图,为了更精细的显示各个区域的频数,这里将整个边界区间分成100份。

    ax4 = fig.add_subplot(325)
    log.info("assert:len:%s %s" % (len(asset.values.T - Y_hat),
                                   (asset.values.T - Y_hat)[0]))
    # distance = map(lambda x:int(x),(asset.values.T - Y_hat)/Y_hat*100)
    # now_distanse=int((asset.iat[-1]-Y_hat[-1])/Y_hat[-1]*100)
    # log.debug("dis:%s now:%s"%(distance[:2],now_distanse))
    # log.debug("now_distanse:%s"%now_distanse)
    distance = (asset.values.T - Y_hat)
    now_distanse = asset.iat[-1] - Y_hat[-1]
    # distance = (asset.values.T-Y_hat)[0]
    pd.Series(distance).plot(kind='hist', stacked=True, bins=100)
    # plt.plot((asset.iat[-1].T-Y_hat),'b',alpha=0.9)
    plt.axvline(now_distanse, hold=None, label="1", color='red')
    # plt.axhline(now_distanse,hold=None,label="1",color='red')
    # plt.axvline(asset.iat[0],hold=None,label="1",color='red',linestyle="--")
    plt.xlabel(
        'Undervalue ------------------------------------------> Overvalue',
        fontsize=12)
    plt.ylabel('Frequency', fontsize=14)
    # plt.title('Undervalue & Overvalue Statistical Chart', fontsize=14)
    plt.legend([code, asset.iat[-1], str(dates[-1])[5:11]], fontsize=12)
    plt.grid(True)

    # plt.show()
    # import os
    # print(os.path.abspath(os.path.curdir))

    ax5 = fig.add_subplot(326)
    # fig.figsize=(5, 10)
    log.info("assert:len:%s %s" % (len(asset.values.T - Y_hat),
                                   (asset.values.T - Y_hat)[0]))
    # distance = map(lambda x:int(x),(asset.values.T - Y_hat)/Y_hat*100)
    distance = (asset.values.T - Y_hat) / Y_hat * 100
    now_distanse = ((asset.iat[-1] - Y_hat[-1]) / Y_hat[-1] * 100)
    log.debug("dis:%s now:%s" % (distance[:2], now_distanse))
    log.debug("now_distanse:%s" % now_distanse)
    # n, bins = np.histogram(distance, 50)
    # print n, bins[:2]
    pd.Series(distance).plot(kind='hist', stacked=True, bins=100)
    # plt.plot((asset.iat[-1].T-Y_hat),'b',alpha=0.9)
    plt.axvline(now_distanse, hold=None, label="1", color='red')
    # plt.axhline(now_distanse,hold=None,label="1",color='red')
    # plt.axvline(asset.iat[0],hold=None,label="1",color='red',linestyle="--")
    plt.xlabel(
        'Undervalue ------------------------------------------> Overvalue',
        fontsize=14)
    plt.ylabel('Frequency', fontsize=12)
    # plt.title('Undervalue & Overvalue Statistical Chart', fontsize=14)
    plt.legend([code, asset.iat[-1]], fontsize=12)
    plt.grid(True)

    # plt.ion()
    plt.draw()
    plt.pause(0.001)

    # plt.show(block=False)

    # plt.draw()
    # plt.pause(0.001)
    # plt.close()
    # print plt.get_backend()
    # plt.show(block=True)
    return df
예제 #11
0
    def do_plot():

        if solver.iter % display == 0:
            lossC[solver.iter] = solver.net.blobs['loss3/loss3'].data.copy()
            acc1[solver.iter] = solver.net.blobs['loss3/top-1'].data.copy()
            acc5[solver.iter] = solver.net.blobs['loss3/top-5'].data.copy()

            loss_disp = 'loss3C= ' + str(lossC[solver.iter]) +  '  top-1= ' + str(acc1[solver.iter])

            print '%3d) %s' % (solver.iter, loss_disp)

            train_loss_C[solver.iter / display] = lossC[solver.iter]
            train_top1[solver.iter / display] = acc1[solver.iter]
            train_top5[solver.iter / display] = acc5[solver.iter]

            ax1.plot(it_axes[0:solver.iter / display], train_loss_C[0:solver.iter / display], 'r')
            ax2.plot(it_axes[0:solver.iter / display], train_top1[0:solver.iter / display], 'b')
            ax2.plot(it_axes[0:solver.iter / display], train_top5[0:solver.iter / display], 'c')

            ax1.set_ylim([0, 25])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)

            # VALIDATE Validation done this way only uses 1 GPU
        if solver.iter % test_interval == 0 and solver.iter > 0:
            loss_val_C = 0
            top1_val = 0
            top5_val = 0

            for i in range(test_iters):
                solver.test_nets[0].forward()
                loss_val_C += solver.test_nets[0].blobs['loss3/loss3'].data
                top1_val += solver.test_nets[0].blobs['loss3/top-1'].data
                top5_val += solver.test_nets[0].blobs['loss3/top-5'].data


            loss_val_C /= test_iters
            top1_val /= test_iters
            top5_val /= test_iters

            print("Val loss: " + str(loss_val_C))

            val_loss_C[solver.iter / test_interval - 1] = loss_val_C
            val_top1[solver.iter / test_interval - 1] = top1_val
            val_top5[solver.iter / test_interval - 1] = top5_val


            ax1.plot(it_val_axes[0:solver.iter / test_interval], val_loss_C[0:solver.iter / test_interval], 'y')
            ax2.plot(it_val_axes[0:solver.iter / test_interval], val_top1[0:solver.iter / test_interval], 'g')
            ax2.plot(it_val_axes[0:solver.iter / test_interval], val_top5[0:solver.iter / test_interval], 'k')


            ax1.set_ylim([0, 25])
            ax1.set_xlabel('iteration ' + 'Best it: ' + str(best_it[0]) + ' Best Val Loss: ' + str(int(lowest_val_loss[0])))
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            title = '../../../ssd2/iMaterialistFashion/models/training/' + training_id + '_' + str(solver.iter) + '.png'
            savefig(title, bbox_inches='tight')

            if loss_val_C < lowest_val_loss[0]:
                print("Best Val loss!")
                lowest_val_loss[0] = loss_val_C
                best_it[0] = solver.iter
                filename = '../../../ssd2/iMaterialistFashion/models/CNN/' + training_id + 'best_valLoss_' + str(
                    int(lowest_val_loss[0])) + '_it_' + str(best_it[0]) + '.caffemodel'
                prefix = 30
                for cur_filename in glob.glob(filename[:-prefix] + '*'):
                    print(cur_filename)
                    os.remove(cur_filename)
                solver.net.save(filename)
예제 #12
0
def do_solve(maxIter, solver, display, test_interval, test_iters):

    # SET PLOTS DATA
    train_loss_C = zeros(maxIter / display)
    train_loss_R = zeros(maxIter / display)
    train_top1 = zeros(maxIter / display)
    train_top5 = zeros(maxIter / display)

    val_loss_C = zeros(maxIter / test_interval)
    val_loss_R = zeros(maxIter / test_interval)
    val_top1 = zeros(maxIter / test_interval)

    it_axes = (arange(maxIter) * display) + display
    it_val_axes = (arange(maxIter) * test_interval) + test_interval

    _, ax1 = subplots()
    ax2 = ax1.twinx()
    ax1.set_xlabel('iteration')
    ax1.set_ylabel(
        'train loss C (r), val loss C (y), train loss R (m), val loss R (k)')
    ax2.set_ylabel('train TOP1 (b), val TOP1 (g), train TOP-5 (2) (c)')
    ax2.set_autoscaley_on(False)
    ax2.set_ylim([0, 1])

    lossC = np.zeros(maxIter)
    lossR = np.zeros(maxIter)
    acc1 = np.zeros(maxIter)

    #RUN TRAINING
    for it in range(niter):
        #st = time.time()
        solver.step(1)  # run a single SGD step in Caffepy()
        #en = time.time()
        #print "Time step: " + str((en-st))

        #PLOT
        if it % display == 0 or it + 1 == niter:
            lossC[solver.iter] = solver.net.blobs['loss3/loss3'].data.copy()
            lossR[solver.iter] = solver.net.blobs['loss3/loss3/R'].data.copy()
            acc1[solver.iter] = solver.net.blobs['loss3/top-1'].data.copy()
            #acc5[solver.iter] = solver.net.blobs['loss2/top-5'].data.copy()

            loss_disp = 'loss3C= ' + str(
                lossC[solver.iter]) + '  loss3R= ' + str(
                    lossR[solver.iter]) + '  top-1= ' + str(acc1[solver.iter])

            print '%3d) %s' % (solver.iter, loss_disp)

            train_loss_C[solver.iter / display] = lossC[solver.iter]
            train_loss_R[solver.iter / display] = lossR[solver.iter]
            train_top1[solver.iter / display] = acc1[solver.iter]
            #train_top5[solver.iter / display] = acc5[solver.iter]

            ax1.plot(it_axes[0:solver.iter / display],
                     train_loss_C[0:solver.iter / display], 'r')
            ax1.plot(it_axes[0:solver.iter / display],
                     train_loss_R[0:solver.iter / display], 'm')
            ax2.plot(it_axes[0:solver.iter / display],
                     train_top1[0:solver.iter / display], 'b')
            # ax2.plot(it_axes[0:solver.iter / display], train_top5[0:solver.iter / display], 'c')

            ax1.set_ylim([0, 10])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)

        #VALIDATE
        if it % test_interval == 0 and it > 0:
            loss_val_R = 0
            loss_val_C = 0
            top1_val = 0
            for i in range(test_iters):
                solver.test_nets[0].forward()
                loss_val_C += solver.test_nets[0].blobs['loss3/loss3'].data
                loss_val_R += solver.test_nets[0].blobs['loss3/loss3/R'].data
                top1_val += solver.test_nets[0].blobs['loss3/top-1'].data

            loss_val_C /= test_iters
            loss_val_R /= test_iters
            top1_val /= test_iters

            print("Val loss C: {:.3f}".format(loss_val_C))

            val_loss_C[solver.iter / test_interval - 1] = loss_val_C
            val_loss_R[solver.iter / test_interval - 1] = loss_val_R
            val_top1[solver.iter / test_interval - 1] = top1_val

            ax1.plot(it_val_axes[0:solver.iter / test_interval],
                     val_loss_C[0:solver.iter / test_interval], 'y')
            ax1.plot(it_val_axes[0:solver.iter / test_interval],
                     val_loss_R[0:solver.iter / test_interval], 'k')
            ax2.plot(it_val_axes[0:solver.iter / test_interval],
                     val_top1[0:solver.iter / test_interval], 'g')

            ax1.set_ylim([0, 10])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)

    return
예제 #13
0
def do_solve(niter, solvers, disp_interval, test_interval, test_iters,
             training_id, batch_size):

    import tempfile
    import numpy as np
    import os
    from pylab import zeros, arange, subplots, plt, savefig
    import time

    # SET PLOTS DATA
    train_loss = zeros(niter / disp_interval)
    train_correct_pairs = zeros(niter / disp_interval)

    val_loss = zeros(niter / test_interval)
    val_correct_pairs = zeros(niter / test_interval)

    it_axes = (arange(niter) * disp_interval) + disp_interval
    it_val_axes = (arange(niter) * test_interval) + test_interval

    _, ax1 = subplots()
    ax2 = ax1.twinx()
    ax1.set_xlabel('iteration')
    ax1.set_ylabel('train loss (r), val loss (g)')
    ax2.set_ylabel('train correct pairs (b) val correct pairs (m)')
    ax2.set_autoscaley_on(False)
    ax2.set_ylim([0, batch_size])

    loss = {name: np.zeros(niter) for name, _ in solvers}
    correct_pairs = {name: np.zeros(niter) for name, _ in solvers}

    #RUN TRAINING
    for it in range(niter):
        for name, s in solvers:
            # start = time.time()
            s.step(1)  # run a single SGD step in Caffe
            # end = time.time()
            # print "Time step: " + str((end - start))
            # print "Max before ReLU: " + str(np.max(s.net.blobs['inception_5b/pool_proj'].data))
            # print "Max last FC: " + str(np.max(s.net.blobs['loss3/classifierCustom'].data))

            loss[name][it] = s.net.blobs['loss3/loss3'].data.copy()
            correct_pairs[name][it] = s.net.blobs['correct_pairs'].data.copy()

        #PLOT
        if it % disp_interval == 0 or it + 1 == niter:
            loss_disp = 'loss=' + str(
                loss['my_solver'][it]) + ' correct_pairs=' + str(
                    correct_pairs['my_solver'][it])

            print '%3d) %s' % (it, loss_disp)

            train_loss[it / disp_interval] = loss['my_solver'][it]
            train_correct_pairs[it /
                                disp_interval] = correct_pairs['my_solver'][it]

            ax1.plot(it_axes[0:it / disp_interval],
                     train_loss[0:it / disp_interval], 'r')
            ax2.plot(it_axes[0:it / disp_interval],
                     train_correct_pairs[0:it / disp_interval], 'b')

            # if it > test_interval:
            #     ax1.plot(it_val_axes[0:it/test_interval], val_loss[0:it/test_interval], 'g') #Val always on top
            ax1.set_ylim([0, 0.05])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            # title = '../training/numbers/training-' + str(it) + '.png'  # Save graph to disk
            # savefig(title, bbox_inches='tight')

        #VALIDATE
        if it % test_interval == 0 and it > 0:
            loss_val = 0
            cur_correct_pairs = 0
            for i in range(test_iters):
                solvers[0][1].test_nets[0].forward()
                loss_val += solvers[0][1].test_nets[0].blobs[
                    'loss3/loss3'].data
                cur_correct_pairs += solvers[0][1].test_nets[0].blobs[
                    'correct_pairs'].data

            loss_val /= test_iters
            cur_correct_pairs /= test_iters

            print("Val loss: " + str(loss_val) + " Val correct pairs: " +
                  str(cur_correct_pairs))

            val_loss[it / test_interval - 1] = loss_val
            val_correct_pairs[it / test_interval - 1] = cur_correct_pairs

            ax1.plot(it_val_axes[0:it / test_interval],
                     val_loss[0:it / test_interval], 'g')
            ax2.plot(it_val_axes[0:it / test_interval],
                     val_correct_pairs[0:it / test_interval], 'm')
            ax1.set_ylim([0, 0.05])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            title = '../../../hd/datasets/landmarks_recognition/models/training/' + training_id + str(
                it) + '.png'  # Save graph to disk
            savefig(title, bbox_inches='tight')

    #Save the learned weights from both nets at the end of the training
    weight_dir = tempfile.mkdtemp()
    weights = {}
    for name, s in solvers:
        filename = 'weights.%s.caffemodel' % name
        weights[name] = os.path.join(weight_dir, filename)
        s.net.save(weights[name])

    return loss, weights
예제 #14
0
def do_solve(niter, solver, disp_interval, test_interval, test_iters, training_id):
    """Run solvers for niter iterations,
       returning the loss and recorded each iteration.
       `solvers` is a list of (name, solver) tuples."""

    import tempfile
    import numpy as np
    import os
    from pylab import zeros, arange, subplots, plt, savefig
    import time
    import glob

    # SET PLOTS DATA
    train_loss = zeros(niter/disp_interval)
    val_loss = zeros(niter/test_interval)

    it_axes = (arange(niter) * disp_interval) + disp_interval
    it_val_axes = (arange(niter) * test_interval) + test_interval

    _, ax1 = subplots()
    # ax2 = ax1.twinx()
    ax1.set_xlabel('iteration')
    ax1.set_ylabel('train loss (r), val loss (g)')
    # ax2.set_ylabel('val loss (g)')
    # ax2.set_autoscaley_on(False)
    # ax2.set_ylim([0, 1])

    lowest_val_loss = 1000
    best_it = 0
    loss = np.zeros(niter)


    #RUN TRAINING
    for it in range(niter):
        # start = time.time()
        solver.step(1)  # run a single SGD step in Caffe
        # end = time.time()
        # print "Time step: " + str((end - start))
        loss[it] = solver.net.blobs['loss3/loss3'].data.copy()

        #PLOT
        if it % disp_interval == 0 or it + 1 == niter:
            loss_disp = 'loss=' + str(loss[it])

            print '%3d) %s' % (it, loss_disp)

            train_loss[it/disp_interval] = loss[it]

            ax1.plot(it_axes[0:it/disp_interval], train_loss[0:it/disp_interval], 'r')
            # if it > test_interval:
            #     ax1.plot(it_val_axes[0:it/test_interval], val_loss[0:it/test_interval], 'g') #Val always on top
            ax1.set_ylim([int(lowest_val_loss) - 1,int(lowest_val_loss) + 4])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            # title = '../training/numbers/training-' + str(it) + '.png'  # Save graph to disk
            # savefig(title, bbox_inches='tight')

        #VALIDATE
        if it % test_interval == 0 and it > 0:
            loss_val = 0
            for i in range(test_iters):
                solver.test_nets[0].forward()
                loss_val += solver.test_nets[0].blobs['loss3/loss3'].data
            loss_val /= test_iters
            print("Val loss: {:.3f}".format(loss_val))

            val_loss[it/test_interval - 1] = loss_val
            ax1.plot(it_val_axes[0:it/test_interval], val_loss[0:it/test_interval], 'g')
            ax1.set_ylim([int(lowest_val_loss) - 1,int(lowest_val_loss) + 4])
            ax1.set_xlabel('iteration ' + 'Best it: ' + str(best_it) + ' Best Val Loss: ' + str(int(lowest_val_loss)))
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            title = '../../../hd/datasets/instaMiro/models/training/' + training_id + str(it) + '.png'  # Save graph to disk
            savefig(title, bbox_inches='tight')

            if loss_val < lowest_val_loss:
                print("Best Val loss!")
                lowest_val_loss = loss_val
                best_it = it
                filename = '../../../hd/datasets/instaMiro/models/CNNRegression/' + training_id + '_best_valLoss_' + str(int(loss_val)) +'_it_' + str(it) + '.caffemodel'
                prefix = 30
                for cur_filename in glob.glob(filename[:-prefix] + '*'):
                    print(cur_filename)
                    os.remove(cur_filename)
                solver.net.save(filename)
예제 #15
0
    if plot:
        ax1.plot(it_axes[0:epoch + 1], plot_data['train_loss'][0:epoch + 1],
                 'r')
        ax2.plot(it_axes[0:epoch + 1],
                 plot_data['train_correct_pairs'][0:epoch + 1], 'b')

        ax1.plot(it_axes[0:epoch + 1], plot_data['val_loss'][0:epoch + 1], 'y')
        ax2.plot(it_axes[0:epoch + 1],
                 plot_data['val_correct_pairs'][0:epoch + 1], 'g')

        plt.title(training_id + str(round(variance, 4)), fontsize=10)
        plt.ion()
        plt.grid(True)
        plt.show()
        plt.pause(0.001)

        # Save graph to disk
        if epoch % 1 == 0 and epoch != 0:
            title = dataset + '/training/' + training_id + '_epoch_' + str(
                epoch) + '_var_' + str(round(variance, 4)) + '.png'
            savefig(title, bbox_inches='tight')

    variance += variance_step

print("Finished Training, saving checkpoint")
filename = dataset + '/models/' + training_id + '_epoch_' + str(epoch)
prefix_len = len('_epoch_' + str(epoch) + '_ValLoss_' +
                 str(round(plot_data['val_loss'][epoch], 2)))
train.save_checkpoint(model, filename, prefix_len)
예제 #16
0
def do_solve(niter, solver, disp_interval, test_interval, test_iters,
             training_id, batch_size):
    """Run solvers for niter iterations,
       returning the loss and recorded each iteration.
       `solvers` is a list of (name, solver) tuples."""

    import tempfile
    import numpy as np
    import os
    from pylab import zeros, arange, subplots, plt, savefig
    import glob
    import time

    # SET PLOTS DATA
    # train_loss = zeros(niter/disp_interval)
    train_loss_r = zeros(niter / disp_interval)
    train_correct_pairs = zeros(niter / disp_interval)
    # train_acc = zeros(niter/disp_interval)

    # val_loss = zeros(niter/test_interval)
    val_loss_r = zeros(niter / test_interval)
    val_correct_pairs = zeros(niter / test_interval)
    # val_acc = zeros(niter/test_interval)

    it_axes = (arange(niter) * disp_interval) + disp_interval
    it_val_axes = (arange(niter) * test_interval) + test_interval

    _, ax1 = subplots()
    ax2 = ax1.twinx()
    ax1.set_xlabel('iteration')
    ax1.set_ylabel(
        'train loss (r), val loss (g),')  # train loss_r (c), val loss_r (o)')
    ax2.set_ylabel('train correct pairs (b) val correct pairs (m)'
                   )  # train top1 (y) val top1 (bk)')
    ax2.set_autoscaley_on(False)
    ax2.set_ylim([0, batch_size])

    # loss = {name: np.zeros(niter) for name, _ in solvers}
    loss_r = np.zeros(niter)
    correct_pairs = np.zeros(niter)
    # acc = {name: np.zeros(niter) for name, _ in solvers}

    lowest_val_loss = 1000
    best_it = 0

    #RUN TRAINING
    for it in range(niter):
        # start = time.time()
        solver.step(1)  # run a single SGD step in Caffe
        # end = time.time()
        # print "Time step: " + str((end - start))
        # print "Max before ReLU: " + str(np.max(s.net.blobs['inception_5b/pool_proj'].data))
        # print "Max last FC: " + str(np.max(s.net.blobs['loss3/classifierCustom'].data))

        #loss[name][it] = s.net.blobs['loss3/loss3/classification'].data.copy()
        loss_r[it] = solver.net.blobs['loss3/loss3/ranking'].data.copy()
        correct_pairs[it] = solver.net.blobs['correct_pairs'].data.copy()
        # acc[name][it] = s.net.blobs['loss3/top-1'].data.copy()

        #PLOT
        if it % disp_interval == 0 or it + 1 == niter:
            # loss_disp = 'loss=' + str(loss['my_solver'][it]) + ' correct_pairs=' + str(correct_pairs['my_solver'][it]) + ' loss ranking=' + str(loss_r['my_solver'][it])
            loss_disp = ' correct_pairs=' + str(
                correct_pairs[it]) + ' loss ranking=' + str(loss_r[it])

            print '%3d) %s' % (it, loss_disp)

            # train_loss[it/disp_interval] = loss[it]
            train_loss_r[it / disp_interval] = loss_r[it]
            train_correct_pairs[it / disp_interval] = correct_pairs[it]
            # train_acc[it/disp_interval] = acc[it] *120

            # ax1.plot(it_axes[0:it/disp_interval], train_loss[0:it/disp_interval], 'r')
            ax1.plot(it_axes[0:it / disp_interval],
                     train_loss_r[0:it / disp_interval], 'c')
            ax2.plot(it_axes[0:it / disp_interval],
                     train_correct_pairs[0:it / disp_interval], 'b')
            # ax2.plot(it_axes[0:it/disp_interval], train_acc[0:it/disp_interval], 'gold')

            # if it > test_interval:
            #     ax1.plot(it_val_axes[0:it/test_interval], val_loss[0:it/test_interval], 'g') #Val always on top
            ax1.set_ylim([0, 2])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            # title = '../training/numbers/training-' + str(it) + '.png'  # Save graph to disk
            # savefig(title, bbox_inches='tight')

        #VALIDATE
        if it % test_interval == 0 and it > 0:
            # loss_val = 0
            loss_val_r = 0
            cur_correct_pairs = 0
            # cur_acc = 0

            for i in range(test_iters):
                solver.test_nets[0].forward()
                # loss_val += solver.test_nets[0].blobs['loss3/loss3/classification'].data
                loss_val_r += solver.test_nets[0].blobs[
                    'loss3/loss3/ranking'].data
                cur_correct_pairs += solver.test_nets[0].blobs[
                    'correct_pairs'].data
                # cur_acc += solvers[0][1].test_nets[0].blobs['loss3/top-1'].data

            # loss_val /= test_iters
            loss_val_r /= test_iters
            cur_correct_pairs /= test_iters
            # cur_acc /= test_iters
            # cur_acc *= 120

            # print("Val loss: " + str(loss_val) + " Val correct pairs: " + str(cur_correct_pairs) + " Val loss ranking: " + str(loss_val_r) + "Val acc: "+ str(cur_acc))
            print(" Val correct pairs: " + str(cur_correct_pairs) +
                  " Val loss ranking: " + str(loss_val_r))

            # val_loss[it/test_interval - 1] = loss_val
            val_loss_r[it / test_interval - 1] = loss_val_r
            val_correct_pairs[it / test_interval - 1] = cur_correct_pairs
            # val_acc[it/test_interval - 1] = cur_acc

            # ax1.plot(it_val_axes[0:it/test_interval], val_loss[0:it/test_interval], 'g')
            ax1.plot(it_val_axes[0:it / test_interval],
                     val_loss_r[0:it / test_interval], 'orange')
            ax2.plot(it_val_axes[0:it / test_interval],
                     val_correct_pairs[0:it / test_interval], 'm')
            # ax2.plot(it_val_axes[0:it/test_interval], val_acc[0:it/test_interval], 'k')
            ax1.set_ylim([0, 2])
            ax1.set_xlabel('iteration ' + 'Best it: ' + str(best_it) +
                           ' Best Val Loss: ' + str(int(lowest_val_loss)))
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            title = '../../../hd/datasets/instaFashion/models/training/' + training_id + str(
                it) + '.png'  # Save graph to disk
            savefig(title, bbox_inches='tight')

            if loss_val_r < lowest_val_loss:
                print("Best Val loss!")
                lowest_val_loss = loss_val_r
                best_it = it
                filename = '../../../hd/datasets/instaFashion/models/CNNContrastive/' + training_id + 'best_valLoss_' + str(
                    int(loss_val_r)) + '_it_' + str(it) + '.caffemodel'
                prefix = 30
                for cur_filename in glob.glob(filename[:-prefix] + '*'):
                    print(cur_filename)
                    os.remove(cur_filename)
                solver.net.save(filename)
예제 #17
0
def do_solve(niter, solvers, disp_interval, test_interval, test_iters,
             training_id):
    """Run solvers for niter iterations,
       returning the loss and recorded each iteration.
       `solvers` is a list of (name, solver) tuples."""

    import tempfile
    import numpy as np
    import os
    from pylab import zeros, arange, subplots, plt, savefig
    import time

    # SET PLOTS DATA
    train_loss = zeros(niter / disp_interval)
    val_loss = zeros(niter / test_interval)

    it_axes = (arange(niter) * disp_interval) + disp_interval
    it_val_axes = (arange(niter) * test_interval) + test_interval

    _, ax1 = subplots()
    ax1.set_xlabel('iteration')
    ax1.set_ylabel('train loss (r), val loss (g)')
    loss = {name: np.zeros(niter) for name, _ in solvers}

    #RUN TRAINING
    for it in range(niter):
        for name, s in solvers:
            s.step(1)  # run a single SGD step in Caffe
            loss[name][it] = s.net.blobs['loss3/loss3'].data.copy()

        #PLOT
        if it % disp_interval == 0 or it + 1 == niter:
            loss_disp = 'loss=' + str(loss['my_solver'][it])

            print '%3d) %s' % (it, loss_disp)

            train_loss[it / disp_interval] = loss['my_solver'][it]

            ax1.plot(it_axes[0:it / disp_interval],
                     train_loss[0:it / disp_interval], 'r')
            ax1.set_ylim([170, 210])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.0001)

        #VALIDATE
        if it % test_interval == 0 and it > 0:
            loss_val = 0
            for i in range(test_iters):
                solvers[0][1].test_nets[0].forward()
                loss_val += solvers[0][1].test_nets[0].blobs[
                    'loss3/loss3'].data
            loss_val /= test_iters
            print("Val loss: {:.3f}".format(loss_val))

            val_loss[it / test_interval - 1] = loss_val
            ax1.plot(it_val_axes[0:it / test_interval],
                     val_loss[0:it / test_interval], 'g')
            ax1.set_ylim([170, 210])
            plt.title(training_id)
            plt.ion()
            plt.grid(True)
            plt.show()
            plt.pause(0.001)
            title = '../../../hd/datasets/instaBarcelona/models/training/' + training_id + str(
                it) + '.png'
            savefig(title, bbox_inches='tight')

    #Save the learned weights from both nets at the end of the training
    weight_dir = tempfile.mkdtemp()
    weights = {}
    for name, s in solvers:
        filename = 'weights.%s.caffemodel' % name
        weights[name] = os.path.join(weight_dir, filename)
        s.net.save(weights[name])

    return loss, weights
예제 #18
0
    fifteen_ago = time.time() - datetime.timedelta(minutes=20).total_seconds()
    relevant_data = [(t, n) for t, n in grouped_data.items() if float(t) > fifteen_ago]

    update_graph([format_timestamp(t) for t, n in relevant_data], [n for t, n in relevant_data])


def update_graph(times, nums):
    plt.xlim(max(times) - datetime.timedelta(minutes=15), max(times))
    plt.ylim(0, max(nums) * 1.2)
    plt.plot_date(times, nums, "ro")
    plt.draw()


path = "datastore.db"
time_checked = time.time() - 3000
size = os.path.getmtime(path)
plt.ion()
refresh()
formatter = DateFormatter("%H:%M")
plt.gcf().axes[0].xaxis.set_major_formatter(formatter)
while True:
    try:
        if os.path.getmtime(path) > size:
            refresh()
            size = os.path.getmtime(path)
            plt.pause(10)
        else:
            plt.pause(1)
    except TclError:  # Can't sleep due to graph being closed
        break