コード例 #1
0
ファイル: equal4.py プロジェクト: ychnlgy/LipoWithGradients
    def train_evalnet(self, evalnet, X, Y):
        print("Evaluation network data size: %d" % X.size(0))
        
        dataset = torch.utils.data.TensorDataset(X, Y)
        dataloader = torch.utils.data.DataLoader(dataset, batch_size=8, shuffle=True)
        epochs = 200
        lossf = torch.nn.MSELoss()
        optim = torch.optim.SGD(evalnet.parameters(), lr=0.01, momentum=0.9)
        sched = torch.optim.lr_scheduler.MultiStepLR(optim, milestones=[80, 160])

        bar = tqdm.tqdm(range(epochs), ncols=80)
        avg = MovingAverage(momentum=0.95)
        
        for epoch in bar:
            for Xb, Yb in dataloader:
                Xb = self.create_normal(Xb)
                Yb = self.create_normal(Yb)
                Yh = evalnet(Xb).squeeze()

                loss = lossf(Yh, Yb)
                full_loss = loss + self.grad_penalty(evalnet, X, Xb)
                optim.zero_grad()
                full_loss.backward()
                optim.step()
            
            sched.step()
            avg.update(loss.item())
            bar.set_description("Fitting evalnet: %.3f" % avg.peek())
コード例 #2
0
ファイル: LLE.py プロジェクト: GMusk/lowlightvideo
    def __init__(self, args):

        input_path = args['input']
        if not os.path.exists(input_path):
            raise FileNotFoundError(f'{input_path} does not exist')

        # get capture object
        self.cap = self.get_video(input_path)
        # get total number of frames
        self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
        # get buffer size
        self.buffer_size = args['buffer']
        # handle size parameter
        self.handle_size(args['size'])
        # get name of file
        filename = self.get_filename(input_path)
        # get output
        output = args['output']
        # get final dir
        self.output_dir = output + filename + '/'
        # create dictionary of video writers
        self.input_videos = {}
        self.video_writers = {}
        # determine options for program
        self.handle_option(args['option'], args['read'])
        # initiate average and edit class
        self.stabiliser = Stabiliser(self.total_frames, self.size)
        self.ma = MovingAverage(self.size, False, False, args['buffer'])
        self.fe = FrameEditor()
        self.plt = Plotter(self.output_dir)
        # creates background model class using mixture of gradients
        self.backSub = cv2.createBackgroundSubtractorMOG2()
コード例 #3
0
ファイル: model.py プロジェクト: ychnlgy/ToyKinematicsDataset
    def __init__(self, D):
        super().__init__(D)
        self.target_modules = [torch.nn.Linear]
        self.gain_rate = 0.01
        self.loss_rate = 0.05

        self.epochs = 100
        self.batch = 8
        self.score = MovingAverage(momentum=0.90)
        self.train_score = MovingAverage(momentum=0.9)
コード例 #4
0
    def __init__(self, data, fastType, fastWindow, fastGradWindow, slowType,
                 slowWindow, slowGradWindow, spreadGradWindow):
        self.data = data.fillna(method='bfill', axis=1)
        self.x = self.data[self.data.columns[0]].tolist()
        self.dates = self.data.index.tolist()
        self.height = len(self.data)
        self.spreadGradWindow = spreadGradWindow
        self.TodayStatics = {}

        self.slowType = slowType
        self.slowWindow = slowWindow
        self.slowGradWindow = slowGradWindow
        self.slowMAObj = MovingAverage(self.data,
                                       self.slowType,
                                       slowWindow,
                                       gradWindow=slowGradWindow)
        self.slowMA = self.slowMAObj.data['MA'].tolist()

        self.fastType = fastType
        self.fastWindow = fastWindow
        self.fastGradWindow = fastGradWindow
        self.fastMAObj = MovingAverage(self.data,
                                       self.fastType,
                                       fastWindow,
                                       gradWindow=fastGradWindow)
        self.fastMA = self.fastMAObj.data['MA'].tolist()
        self.tradeSignal = self.CalcCrossOverScore()
        self.spread = self.CalcSpread()
        self.CalcSpreadGradient()  # This will set all gradient data
        self.tradeDaySeries = self.CalcTradeDaySeries()
        self.TodayStatics['todayMAslow'] = self.slowMA[
            -1]  # today's level of the slow MA
        self.TodayStatics['todayMAfast'] = self.fastMA[
            -1]  # today's level of the fast MA
        self.TodayStatics['todayCrossScore'] = self.tradeSignal[-1]
        self.TodayStatics['todaySpread'] = self.spread[-1]
        self.TodayStatics['todayTradeDays'] = self.tradeDaySeries[-1]
        self.preCrossOver = self.CalcLastCrossOver()
        self.CalcTradeDayZscores()  # Calc Z-score of Trading days
        if (fastWindow > slowWindow):
            raise Exception('Parameter Error : fast MA is larger than slow MA')
        self.CalcHiLoInCurrentTrade(
        )  # calculate the price hilo during current trade
コード例 #5
0
ファイル: LLE.py プロジェクト: GMusk/lowlightvideo
class LowLightEnhancer:
    """A class for enhancing low light video files

    The LowLightEnhancer class is used to enhance low light video using a logarithmic expansion of frames averaged over some buffer size

    """
    def __init__(self, args):

        input_path = args['input']
        if not os.path.exists(input_path):
            raise FileNotFoundError(f'{input_path} does not exist')

        # get capture object
        self.cap = self.get_video(input_path)
        # get total number of frames
        self.total_frames = int(self.cap.get(cv2.CAP_PROP_FRAME_COUNT))
        # get buffer size
        self.buffer_size = args['buffer']
        # handle size parameter
        self.handle_size(args['size'])
        # get name of file
        filename = self.get_filename(input_path)
        # get output
        output = args['output']
        # get final dir
        self.output_dir = output + filename + '/'
        # create dictionary of video writers
        self.input_videos = {}
        self.video_writers = {}
        # determine options for program
        self.handle_option(args['option'], args['read'])
        # initiate average and edit class
        self.stabiliser = Stabiliser(self.total_frames, self.size)
        self.ma = MovingAverage(self.size, False, False, args['buffer'])
        self.fe = FrameEditor()
        self.plt = Plotter(self.output_dir)
        # creates background model class using mixture of gradients
        self.backSub = cv2.createBackgroundSubtractorMOG2()

    def release_video(self):
        for writer in self.video_writers.values():
            writer.release()

    def handle_size(self, size):
        if size != None:
            self.size = tuple(args.size[1], args.size[0])
        else:
            # get caps original size
            self.size = (int(self.cap.get(4)), int(self.cap.get(3)))

    def get_filename(self, path):
        path_split = path.split('/')
        filename = path_split[-1].split('.')[0]
        return filename

    def handle_option(self, opt, read):
        if not os.path.exists(self.output_dir):
            os.mkdir(self.output_dir)
        cur_filename = self.output_dir + "input30.mp4"
        self.video_writers["input"] = self.create_vwriter(cur_filename)
        for char in opt:
            if char == 's':
                cur_filename = self.output_dir + "stable.mp4"
                if read:
                    cap = self.get_video(cur_filename)
                    if cap:
                        print("input found")
                        self.input_videos["stable"] = cap
                        continue
                self.video_writers["stable"] = self.create_vwriter(
                    cur_filename)
            elif char == 'e':
                cur_filename = self.output_dir + "expand.mp4"
                if read:
                    cap = self.get_video(cur_filename)
                    if cap:
                        print("input found")
                        self.input_videos["expand"] = cap
                        continue
                self.video_writers["expand"] = self.create_vwriter(
                    cur_filename)
            elif char == 'm':
                cur_filename = self.output_dir + "motion.mp4"
                if read:
                    cap = self.get_video(cur_filename)
                    if cap:
                        print("input found")
                        self.input_videos["motion"] = cap
                        continue
                self.video_writers["motion"] = self.create_vwriter(
                    cur_filename)
            elif char == 'c':
                cur_filename = self.output_dir + "closure.mp4"
                if read:
                    cap = self.get_video(cur_filename)
                    if cap:
                        print("input found")
                        self.input_videos["closure"] = cap
                        continue
                self.video_writers["closure"] = self.create_vwriter(
                    cur_filename)
            elif char == 'a':
                cur_filename = self.output_dir + "average.mp4"
                self.video_writers["average"] = self.create_vwriter(
                    cur_filename)
                cur_filename = self.output_dir + "contribution.mp4"
                self.video_writers["contribution"] = self.create_vwriter(
                    cur_filename)

    def create_vwriter(self, filename):
        # set codec for written video
        fourcc = cv2.VideoWriter_fourcc(*"mp4v")
        fps = int(self.cap.get(cv2.CAP_PROP_FPS))
        video_writer = cv2.VideoWriter(filename, fourcc, fps, self.size[::-1])
        return video_writer

    def get_video(self, path):
        cap = cv2.VideoCapture(path)
        if not cap.isOpened():
            print("No Video Found")
            return None
        else:
            return cap

    def enough_frames(self):
        return True

    def enhance(self):
        """Read video, perform enhancement, & write enhanced video to file
        """

        # first pass
        print("first pass")
        if "stable" not in self.input_videos and "stable" in self.video_writers:
            for i in tqdm(range(self.total_frames)):

                # Capture frame-by-frame
                ret, frame = self.cap.read()

                # check capture
                if not ret:
                    print("video finished")
                    break

                self.stabiliser.get_transform(frame, i)

        # stabilise
        traj, smooth = self.stabiliser.get_trajectory()

        # plot trajectorys
        self.plt.plot_trajectory(traj, smooth)

        # Reset stream to first frame
        self.cap.set(cv2.CAP_PROP_POS_FRAMES, 0)

        # default mask value
        mask_fg = np.ones((self.size), dtype=np.uint8) * 255

        finished = False
        i = 0

        print("second pass")
        while not finished:

            # Capture frame-by-frame
            ret, frame = self.cap.read()
            initial = frame
            self.video_writers["input"].write(initial)

            # check capture
            if not ret:
                print("video finished")
                break

            # stablise video
            if "stable" in self.video_writers:
                stable = self.stabiliser.get_stable_frame(frame, i)
                frame = stable
                self.video_writers["stable"].write(stable)
            # check if reading from file
            elif "stable" in self.input_videos:
                ret, frame = self.input_videos["stable"].read()

            # get foreground mask
            if "motion" in self.video_writers:
                fg = self.backSub.apply(frame)
                _, thresh = cv2.threshold(fg, 127, 255, cv2.THRESH_BINARY_INV)
                mask_fg = thresh
                # convert to 3 channels for output
                mask_fg_3 = cv2.cvtColor(fg, cv2.COLOR_GRAY2BGR)
                self.video_writers["motion"].write(mask_fg_3)

                if "closure" in self.video_writers:
                    # threshold and inverse
                    close = cv2.morphologyEx(mask_fg, cv2.MORPH_CLOSE,
                                             np.ones((3, 3), np.uint8))
                    close_3 = cv2.cvtColor(close, cv2.COLOR_GRAY2BGR)
                    self.video_writers["closure"].write(close_3)
                    mask_fg = close

            # Call image operations here passes uint8 gets uint8
            if "expand" in self.video_writers:
                edit_frame = self.fe.doOperation(frame)
                self.video_writers["expand"].write(edit_frame)
                frame = edit_frame

            # store frame in moving average
            if "average" in self.video_writers:

                # get stationary contribution from frame
                contribution = cv2.bitwise_and(frame, frame, mask=mask_fg)

                av_frame = self.ma.add(contribution, mask_fg)
                if av_frame is not None:

                    av_frame = np.uint8(av_frame)
                    contribution = np.uint8(contribution)

                    self.video_writers["contribution"].write(contribution)
                    self.video_writers["average"].write(av_frame)

                    if self.enough_frames():
                        self.plt.plot_histogram(initial, "input", True)
                        cv2.imwrite(self.output_dir + "input_frame" + ".png",
                                    initial)
                        cv2.imwrite(
                            self.output_dir + "expanded_frame" + ".png",
                            edit_frame)
                        self.plt.plot_histogram(av_frame, "final", True)
                        cv2.imwrite(self.output_dir + "final_frame" + ".png",
                                    av_frame)
                        if "motion" in self.video_writers:
                            cv2.imwrite(self.output_dir + "mask" + ".png",
                                        close_3)
                        finished = True
            i += 1

            # key to break playback
            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        # When everything is done, release the capture
        self.release_video()
        self.cap.release()
        cv2.destroyAllWindows()
コード例 #6
0
 def test_MovingAverageComputesCorrectlyForLength3(self):
     l = [0, 2, 0]
     expected = [2 / 3]
     result = MovingAverage().EvenlySpaced(l, 3)
     self.assertListEqual(expected, result)
コード例 #7
0
ファイル: plotGraph.py プロジェクト: holgerBerger/ludalo
def plotGraph(list_of_list, diagramName='', mvaLength=21):
    ''' [[xValues],[yValues]]
        This Plots all given values and his filtered line.
    '''
    if not len(list_of_list) % 2 == 0:
        raise ArgMismatch('Please provide [[xValues],[yValues], ...] as arguments')

    time_start = time.time()
#------------------------------------------------------------------------------

    oneHouer = 60 * 60
    quater_day = oneHouer * 6
    half_a_day = oneHouer * 12
    oneDay = oneHouer * 24
    oneWeek = oneDay * 7
    oneMonth = oneWeek * 4
    half_a_Year = oneMonth * 6
    one_Year = half_a_Year * 2
    tmin = 0

    now = time.mktime(time.localtime())

    for i in range(0, len(list_of_list), 2):
        # ---- Calc filtered values ------
        list_length = len(list_of_list[i])
        if list_length < mvaLength:
            mva = MovingAverage(list_length)
        else:
            mva = MovingAverage(mvaLength)
        for j in  range(0, len(list_of_list[i])):
            mva.addValue(list_of_list[i][j], list_of_list[i+1][j])

        filterd_values = mva.getAverage()
        fvTimes = []
        fvValues = []
        for item in filterd_values:
            fvTimes.append(item[0])
            fvValues.append(item[1])

        # ---- Append org values
        dates = [dt.datetime.fromtimestamp(ts) for ts in list_of_list[i]]
        datenums = md.date2num(dates)
        ax = plt.gca()
        xfmt = md.DateFormatter('%Y-%m-%d %H:%M')
        ax.xaxis.set_major_formatter(xfmt)
        plt.plot(datenums, list_of_list[i + 1], lw=1, color='gray')

        # ---- Append filtered values ------
        dates = [dt.datetime.fromtimestamp(ts) for ts in fvTimes]
        datenums = md.date2num(dates)
        tmin = min(datenums)
        ax = plt.gca()
        xfmt = md.DateFormatter('%Y-%m-%d %H:%M')
        ax.xaxis.set_major_formatter(xfmt)
        plt.plot(datenums, fvValues, lw=2)

    plt.subplots_adjust(bottom=0.2)
    plt.xticks(rotation=90)
    ax.grid(True)
    plt.title(diagramName)
    #plt.text(tmin, -300, r'Read')
    #plt.text(tmin, +200, r'Write')
    plt.xlabel('Time')
    plt.ylabel('Speed [MB/s]')
    plt.savefig('plt/' + str(diagramName) + '.png')
    plt.close('all')
#------------------------------------------------------------------------------
    time_end = time.time()
コード例 #8
0
 def test_MovingDateAverageWith2DayEvenSpacingLength4(self):
     d = {1: 1, 3: 4, 5: 5}
     expected = {5: (1 + 4 + 4 + 5) / 4}
     result = MovingAverage().ArbitrarySpacing(d, 3)
     self.assertEqual(expected, result)
コード例 #9
0
def main():
    dates = []
    temps = []

    with open(CSV_FILENAME, 'r') as csv_file:
        r = csv.reader(csv_file, delimiter=',', quotechar='"')

        first_row = True
        for row in r:
            # First row contains csv table header, so we discard it
            if first_row:
                first_row = False
                continue

            dates.append(datetime.strptime(row[1], DATETIME_FORMAT))
            temps.append(int(row[5]))

    averages = list(MovingAverage(temps, MOVING_AVERAGE_WINDOW_SIZE))
    """
    Gives us points where the 2 graphs intersect by
    checking where the sign of the difference changes
    """
    intersections = np.argwhere(np.diff(
        np.sign([temps[i] - averages[i] for i in range(len(temps))]))).flatten()

    i_dates = []
    i_temps = []
    for inter in intersections:
        """
        No need for a bounds check, since intersection can't happen unless
        there's a point following this one that causes the intersection
        to take place
        """
        vt0 = temps[inter]
        vt1 = temps[inter + 1]
        slope_temp = vt1 - vt0

        va0 = averages[inter]
        va1 = averages[inter + 1]
        slope_avg = va1 - va0

        # Distance from point0 to the intersection point
        dist = abs(vt0 - va0) / abs(slope_temp - slope_avg)

        # Calculate the actual time and temperature of intersection
        time_delta = dates[inter + 1] - dates[inter]

        i_date = dates[inter] + time_delta * dist
        i_temp = vt0 + slope_temp * dist

        print("Průnik nalezen {0:%d. %m. %Y v %H:%M} o teplotě {1}°C".format(
            i_date, round(i_temp, 1)))

        i_dates.append(i_date)
        i_temps.append(i_temp)

    plt.plot(dates, temps, 'b-', dates, averages, 'g-', linewidth=1)
    plt.plot(i_dates, i_temps, 'r+', mew=2, ms=8)

    plt.ylabel('Teplota (°C)')
    plt.xlabel('Čas')
    plt.title('Průměrná templota v Chodově, Praha')
    plt.grid()

    plt.show()
コード例 #10
0
ファイル: plotGraph.py プロジェクト: holgerBerger/ludalo
def plotJob(timestamps, rbs, rio, wbs, wio, title, verbose):

    # convert timestamps
    dates1 = [dt.datetime.fromtimestamp(ts) for ts in timestamps]

    # calculate filter size
    fsize = int(math.sqrt(len(dates1)))
    if fsize < 3:
        fsize = 3

    # claculate filterd values
    mvaRB = MovingAverage(fsize)
    mvaWB = MovingAverage(fsize)
    for i in range(len(timestamps)):
        mvaWB.addValue(timestamps[i], wbs[i])
        mvaRB.addValue(timestamps[i], rbs[i])

    filterd_WB = mvaWB.getAverage()
    filterd_RB = mvaRB.getAverage()

    WB_Values = []
    for item in filterd_WB:
        WB_Values.append(item[1])

    RB_Values = []
    for item in filterd_RB:
        RB_Values.append(item[1])

    # Write
    fig = plt.figure(figsize=(16, 10))
    ax1 = fig.add_subplot(2, 3, 1)
    plt.xticks(rotation=45)
    plt.xlabel('Time')
    plt.ylabel('Speed [MB/s]')

    ax2 = fig.add_subplot(2, 3, 2)
    plt.xlabel('IO Size [KB]')
    plt.ylabel('IOs')
    #plt.gca().yaxis.set_major_formatter(formatter)

    ax3 = fig.add_subplot(2, 3, 3)
    plt.ylabel('Speed [MB/s]')
    plt.xlabel('IO Size [KB]')

    # Read
    ax4 = fig.add_subplot(2, 3, 4)
    plt.xticks(rotation=45)
    plt.xlabel('Time')
    plt.ylabel('Speed [MB/s]')

    ax5 = fig.add_subplot(2, 3, 5)
    plt.xlabel('IO Size [KB]')
    plt.ylabel('IOs')
    #plt.gca().yaxis.set_major_formatter(formatter)

    ax6 = fig.add_subplot(2, 3, 6)
    plt.ylabel('Speed [MB/s]')
    plt.xlabel('IO Size [KB]')

    # Speed
    ax1.plot(dates1, wbs, label='Exact Data', lw=1, color='gray')
    ax1.plot(dates1, WB_Values, label='Filterd Data', lw=2, color='green')
    ax1.set_title('Write MB')
    ax1.legend(loc='best')

    ax4.plot(dates1, rbs, label='Exact Data', lw=1, color='gray')
    ax4.plot(dates1, RB_Values, label='Filterd Data', lw=2, color='blue')
    ax4.set_title('Read MB')
    ax4.legend(loc='best')

    # Histograms
    bins1 = 30
    # avoid arrays with only one elemet. important!
    plot_wio = np.append(wio[wio > 0], 1)
    plot_wbs = np.append(wbs[wbs > 0], 1)

    plot_rio = np.append(rio[rio > 0], 1)
    plot_rbs = np.append(rbs[rbs > 0], 1)

    ax2.hist(plot_wio, bins=bins1, color='green')
    ax2.set_title('Histogram of Write IO Size')

    ax5.hist(plot_rio, bins=bins1, color='blue')
    ax5.set_title('Histogram of Read IO Size')

    # ------ scatter plots --------

    if len(plot_wio) > 1 and len(plot_wbs) > 1:
        ax3.hexbin(plot_wio, plot_wbs, bins='log', mincnt=1)
        # ax3.scatter(wio, wbs, color='green', s=1)
        ax3.set_title('Scatter Plots Write')

    if len(plot_rio) > 1 and len(plot_rbs) > 1:
        ax6.hexbin(plot_rio, plot_rbs, bins='log', mincnt=1)
        #ax6.scatter(rio[rio > 0], rbs[rbs > 0], color='blue', s=1)
        ax6.set_title('Scatter Plots Read')

    # show data plot
    plt.tight_layout()
    plt.savefig(str(title) + '.png', dpi=120)
    print 'plot to', str(title) + '.png'
    #plt.show()
    plt.close('all')
コード例 #11
0
df = df[start_time:end_time]
st.dataframe(df)

st.subheader('3.训练集划分')
number = st.number_input("请输入训练集所占比例:",min_value=0.5,max_value=0.9,value=0.8,step=0.1)
split = int(number * len(df))
st.write("选择的数据集大小:",len(df))
st.write("训练集大小:",split)
st.write("预测集大小:",len(df)-split)

st.subheader('4.选择预测目标')
type = st.selectbox('请选择预测目标:',('Close','Turnover'))
st.line_chart(df[type])

st.subheader('5.选择机器学习算法')
genre = st.selectbox("请选择时间序列预测算法",
     ('移动平均算法', '线性回归算法', '最近邻算法', 'AutoARIMA算法', 'LSTM算法'))
if genre == '移动平均算法':
    MovingAverage(df, type, split)
elif genre == '线性回归算法':
     LinearRegression(df, type, split)
elif genre == '最近邻算法':
     KNearestNeighbours(df, type, split)
elif genre == 'AutoARIMA算法':
    AutoARIMA(df, type, split)
elif genre == 'LSTM算法':
    LongShortTM(df, type, split)


コード例 #12
0
    def __init__(self, StockCode, StartDate, EndDate):
        log_txt.insert(INSERT, 'getting price list\n')
        try:
            df = get_pse_data(StockCode, StartDate, EndDate)
        except Exception as e:
            log_txt.insert(INSERT, 'Get pse data error: {e}\n'.format(e=e))

        log_txt.insert(INSERT, 'price list obtained')

        # Derive the 30 day SMA of JFC's closing prices
        ma30 = df.close.rolling(30).mean()
        ma100 = df.close.rolling(100).mean()
        # Combine the closing prices with the 30 day SMA
        data = pd.concat([df.close, ma30, ma100], axis=1).dropna()
        data.columns = [
            'Closing Price', 'Simple Moving Average (30 day)',
            'Simple Moving Average (100 day)'
        ]

        #Create a new dataframe
        signal = pd.DataFrame(index=df['close'].index)
        signal['close'] = data['Closing Price']
        signal['SMA30'] = data['Simple Moving Average (30 day)']
        signal['SMA100'] = data['Simple Moving Average (100 day)']

        log_txt.insert(INSERT, 'calculate RSI\n')
        v_RSI = RSI.getRSI(self, signal)
        log_txt.insert(INSERT, 'done calculating RSI\n')
        log_txt.insert(INSERT, 'calculate moving averages\n')
        MA = MovingAverage.buy_sell(signal)
        log_txt.insert(INSERT, 'done calculating moving averages\n')

        try:
            signal['Buy_Signal_Price_MA'] = MA[0]
            signal['Sell_Signal_Price_MA'] = MA[1]
            signal['Buy_Signal_Price_RSI'] = v_RSI[0]
            signal['Sell_Signal_Price_RSI'] = v_RSI[1]
        except Exception as e:
            log_txt.insert(INSERT,
                           'Get buy and sell data error: {e}\n'.format(e=e))

        # Visually Show The Stock buy and sell signals
        # Create the title

        title = 'Adj. Close Price History Buy / Sell Signals   '
        # Get the stocks
        my_stocks = signal
        ticker = "close"

        # Create and plot the graph
        try:
            plt.figure(figsize=(12.2, 4.5))  # width = 12.2in, height = 4.5
            plt.scatter(my_stocks.index,
                        my_stocks['Buy_Signal_Price_MA'],
                        color='green',
                        label='Buy Signal MA',
                        marker='^',
                        alpha=1)
            plt.scatter(my_stocks.index,
                        my_stocks['Sell_Signal_Price_MA'],
                        color='darkred',
                        label='Sell Signal MA',
                        marker='v',
                        alpha=1)
            plt.scatter(my_stocks.index,
                        my_stocks['Buy_Signal_Price_RSI'],
                        color='blue',
                        label='Buy Signal RSI',
                        marker='^',
                        alpha=1)
            plt.scatter(my_stocks.index,
                        my_stocks['Sell_Signal_Price_RSI'],
                        color='red',
                        label='Sell Signal RSI',
                        marker='v',
                        alpha=1)
            plt.plot(
                my_stocks[ticker], label=ticker, alpha=0.35
            )  # plt.plot( X-Axis , Y-Axis, line_width, alpha_for_blending,  label)
            plt.plot(my_stocks['SMA30'], label='SMA30', alpha=0.35)
            plt.plot(my_stocks['SMA100'], label='SMA100', alpha=0.35)
            plt.title(title)
            plt.xlabel('Date', fontsize=18)
            plt.ylabel('Adj. Close Price PHP', fontsize=18)
            plt.legend(loc='upper left')
        except Exception as e:
            log_txt.insert(INSERT, 'Plotting data error: {e}\n'.format(e=e))
        try:
            plt.show()
        except Exception as e:
            log_txt.insert(INSERT, 'Plotting data error: {e}\n'.format(e=e))
        log_txt.insert(INSERT, 'Produced graph\n')
コード例 #13
0
 def test_MovingDateAverageWith2DayEvenSpacing(self):
     d = {1: 1, 3: 4, 5: 5}
     expected = {3: 5 / 2, 5: 9 / 2}
     result = MovingAverage().ArbitrarySpacing(d, 2)
     self.assertEqual(expected, result)
コード例 #14
0
        TakeProfit.__init__(self, data, tradeSignal)
        self.takeProfitRule = takeProfitRule
        self.CalcTakeProfitSeries()

    def CalcTakeProfitLevel(self, highWaterMark, tradeSignal):
        if tradeSignal == 1:
            level = highWaterMark * (
                1 - (self.takeProfitRule + 0.0) / 100
            )  # Set the NEW take profit BELOW the market
            tpLevel = [level, np.nan]
        else:
            level = highWaterMark * (
                1 + (self.takeProfitRule + 0.0) / 100
            )  # Set the OLD take profit ABOVE the market
            tpLevel = [np.nan, level]
        return tpLevel


if __name__ == "__main__":
    data = pd.DataFrame([
        1.0455, 1.0405, 1.0489, 1.0607, 1.0532, 1.0574, 1.0554, 1.0582, 1.0613,
        1.0643, 1.0601, 1.0713, 1.063, 1.0664, 1.0703, 1.0765, 1.0731, 1.0748,
        1.0682, 1.0699, 1.0695, 1.0798, 1.0769, 1.0759, 1.0783, 1.075, 1.0683,
        1.0698, 1.0655, 1.0643
    ])
    MAWindow = 5
    ma = MovingAverage(data, 'EMA', 5)
    tradeSignal = ma.TradeSignal
    t = TakeProfitFixed(data, tradeSignal, 5)
    print t.takeProfitContainer
コード例 #15
0
 def test_MovingAverageHasLessElements(self):
     values = [1, 2, 3]
     ma = MovingAverage().EvenlySpaced(values, 2)
     self.assertEqual(len(values), len(ma) + 1)
コード例 #16
0
ファイル: model.py プロジェクト: ychnlgy/ToyKinematicsDataset
class EvolutionaryUnit(Model):
    def __init__(self, D):
        super().__init__(D)
        self.target_modules = [torch.nn.Linear]
        self.gain_rate = 0.01
        self.loss_rate = 0.05

        self.epochs = 100
        self.batch = 8
        self.score = MovingAverage(momentum=0.90)
        self.train_score = MovingAverage(momentum=0.9)

    def set_device(self, device):
        self.device = device
        self.to(device)

    def __lt__(self, other):
        return self.get_score() < other.get_score()

    def fit(self, X, Y, X_test, Y_test):

        dataset = torch.utils.data.TensorDataset(X, Y)
        dataloader = torch.utils.data.DataLoader(dataset,
                                                 batch_size=self.batch,
                                                 shuffle=True)

        lossf = torch.nn.MSELoss()
        optim = torch.optim.Adam(self.parameters())
        sched = torch.optim.lr_scheduler.MultiStepLR(optim, milestones=[60])

        for epoch in range(self.epochs):

            self.train()
            for x, y in dataloader:
                x = x.to(self.device)
                y = y.to(self.device)
                yh = self(x)
                loss = lossf(yh, y)
                optim.zero_grad()
                loss.backward()
                optim.step()

                self.train_score.update(self.calc_score(yh, y))

            sched.step()

        with torch.no_grad():
            self.eval()
            yh_test = self(X_test.to(self.device))
            self.score.update(self.calc_score(yh_test, Y_test.to(self.device)))

    def get_score(self):
        return self.score.peek()  # lower the better

    def get_train_score(self):
        return self.train_score.peek()

    def mutate(self):
        out = copy.deepcopy(self)
        out.apply(self.recurse_apply(self.gain_ability))
        out.apply(self.recurse_apply(self.lose_ability))
        return out

    def share_abilities(self, other):
        n1 = copy.deepcopy(self)
        n2 = copy.deepcopy(other)
        for m1, m2 in zip(n1.net, n2.net):
            self.exchange(m1, m2)
        return n1, n2

    # === PRIVATE ===

    def exchange(self, m1, m2):
        if type(m1) in self.target_modules:
            p = 0.5
            i = torch.rand_like(m1.weight.data) < p
            m1.weight.data[i] = m2.weight.data[i]
            m2.weight.data[1 - i] = m1.weight.data[1 - i]

    def calc_score(self, yh, y):
        return (yh - y).abs().sum().item()

    def recurse_apply(self, f):
        return lambda m: f(m) if type(m) in self.target_modules else None

    def gain_ability(self, m):
        i, v = self.get_new_weights(m, self.gain_rate)
        torch.nn.init.kaiming_uniform_(v, a=math.sqrt(5))
        m.weight.data[i] = v[i]

    def lose_ability(self, m):
        i, v = self.get_new_weights(m, self.loss_rate)
        m.weight.data[i] = v[i]

    def get_new_weights(self, m, rate):
        i = (torch.rand_like(m.weight.data) < rate)
        v = torch.zeros_like(m.weight.data)
        return i, v
コード例 #17
0
#fname = '.\camera_cal\calibration3.jpg'
#img = cv2.imread(fname)
#
#top_down, perspective_M = cc.corners_unwarp(img, cc.nx, cc.ny, mtx, dist)
#
#f, (ax1, ax2) = plt.subplots(1,2, figsize=(12, 7))
#f.tight_layout()
#ax1.imshow(img)
#ax1.set_title('Original Image')
#ax2.imshow(top_down)
#ax2.set_title('Undistorted and Warped Image')
##plt.subplots_adjust(left=0., right=1, top=0.9, bottom=0.)

#%%
global counter, left_rad_filt, right_rad_filt
left_rad_filt = MovingAverage(8)
right_rad_filt = MovingAverage(8)
left_fit_filt = MovingAverage(6)
right_fit_filt = MovingAverage(6)

counter = 0


def pipeline_image(image, force_first_frame=False, plot_figure=False):

    global counter
    if force_first_frame:
        counter = 1
    else:
        counter += 1