Skip to content

Commit

Permalink
Merge pull request #4 from dev-abuke/task-backend
Browse files Browse the repository at this point in the history
Task backend
  • Loading branch information
dev-abuke authored Jun 21, 2024
2 parents 41ae2a6 + d2ee471 commit 26236d1
Show file tree
Hide file tree
Showing 6 changed files with 466 additions and 1 deletion.
3 changes: 2 additions & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -127,7 +127,8 @@ celerybeat.pid
env/
backtesting/**
venv/
backtest/
backtesting/**
backtest/**
ENV/
env.bak/
venv.bak/
Expand Down
85 changes: 85 additions & 0 deletions backend/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,85 @@
from typing import List
from fastapi import FastAPI, Depends, HTTPException
from sqlalchemy.orm import Session
from . import models, schemas, database
import pandas as pd
import json

from utils.backtest import run_backtest

models.Base.metadata.create_all(bind=database.engine)

app = FastAPI()

def get_db():
db = database.SessionLocal()
try:
yield db
finally:
db.close()

@app.get('/health')
def check_health():
return "healthy"

@app.post('/indicators/', response_model=schemas.Indicator)
def create_indicator(indicator: schemas.IndicatorCreate, db: Session = Depends(get_db)):
db_indicator = models.Indicator(**indicator.dict())
db.add(db_indicator)
db.commit()
db.refresh(db_indicator)
return db_indicator

@app.get('/indicators/', response_model=List[schemas.Indicator])
def read_indicators(skip: int = 0, limit: int = 10, db: Session = Depends(get_db)):
indicators = db.query(models.Indicator).offset(skip).limit(limit).all()
return indicators

@app.post('/scenes/', response_model=schemas.Scene)
def create_scene(scene: schemas.SceneCreate, db: Session = Depends(get_db)):
db_scene = models.Scene(**scene.dict())
db.add(db_scene)
db.commit()
db.refresh(db_scene)
return db_scene

@app.get('/scenes/', response_model=List[schemas.Scene])
def read_scenes(skip: int = 0, limit: int = 10, db: Session = Depends(get_db)):
scenes = db.query(models.Scene).offset(skip).limit(limit).all()
return scenes

@app.post('/backtests/{scene_id}', response_model=List[schemas.BacktestResult])
def perform_backtest(scene_id: int, db: Session = Depends(get_db)):
db_scene = db.query(models.Scene).filter(models.Scene.id == scene_id).first()
if db_scene is None:
raise HTTPException(status_code=404, detail="Scene not found")

# Fetch data based on the scene's date range
df = fetch_data(db_scene.start_date, db_scene.end_date)

# Perform backtest
metrics = run_backtest({
'period': db_scene.period,
'indicator_name': db_scene.indicator.name
}, df)

# Save metrics to database
backtest_results = []
for metric in metrics:
db_backtest_result = models.BacktestResult(scene_id=scene_id, **metric)
db.add(db_backtest_result)
db.commit()
db.refresh(db_backtest_result)
backtest_results.append(db_backtest_result)

return backtest_results

@app.get('/backtest_results/', response_model=List[schemas.BacktestResult])
def read_backtest_results(skip: int = 0, limit: int = 10, db: Session = Depends(get_db)):
backtest_results = db.query(models.BacktestResult).offset(skip).limit(limit).all()
return backtest_results

def fetch_data(start_date, end_date):
# Replace this with actual data fetching logic
df = pd.read_csv('btc_usdt_candlestick.csv', index_col='timestamp', parse_dates=True)
return df.loc[start_date:end_date]
53 changes: 53 additions & 0 deletions backend/schemas.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
from pydantic import BaseModel
from typing import List, Optional
from datetime import date, datetime

class IndicatorBase(BaseModel):
name: str
description: Optional[str] = None

class IndicatorCreate(IndicatorBase):
pass

class Indicator(IndicatorBase):
id: int

class Config:
orm_mode = True

class SceneBase(BaseModel):
period: int
start_date: date
end_date: date
indicator_id: int

class SceneCreate(SceneBase):
pass

class Scene(SceneBase):
id: int
backtests: List['BacktestResult'] = []
indicator: Indicator

class Config:
orm_mode = True

class BacktestResultBase(BaseModel):
scene_id: int
gross_profit: float
net_profit: float
number_of_trades: int
winning_trades: Optional[int] = None
losing_trades: Optional[int] = None
max_drawdown: Optional[float] = None
sharpe_ratio: Optional[float] = None

class BacktestResultCreate(BacktestResultBase):
pass

class BacktestResult(BacktestResultBase):
id: int
created_at: datetime

class Config:
orm_mode = True
68 changes: 68 additions & 0 deletions backend/utils/backtest.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
import backtrader as bt
import pandas as pd

class SMAStrategy(bt.Strategy):
params = (
('period', 15),
)

def __init__(self):
self.sma = bt.indicators.SMA(self.data.close, period=self.params.period)
self.order = None
self.buyprice = None
self.buycomm = None

def next(self):
if self.order:
return

if self.sma > self.data.close:
self.order = self.buy()
elif self.sma < self.data.close:
self.order = self.sell()

def notify_order(self, order):
if order.status in [order.Submitted, order.Accepted]:
return

if order.status in [order.Completed]:
if order.isbuy():
self.buyprice = order.executed.price
self.buycomm = order.executed.comm
else:
self.sellprice = order.executed.price
self.sellcomm = order.executed.comm

self.bar_executed = len(self)

self.order = None

def notify_trade(self, trade):
if not trade.isclosed:
return

print(f'Operation Profit, Gross {trade.pnl}, Net {trade.pnlcomm}')
self.cerebro.metrics.append({
'Period': self.params.period,
'Gross Profit': trade.pnl,
'Net Profit': trade.pnlcomm,
'Number of Trades': len(self),
'Winning Trades': len([t for t in self._trades if t.pnl > 0]),
'Losing Trades': len([t for t in self._trades if t.pnl <= 0]),
'Max Drawdown': self.broker.maxdrawdown,
'Sharpe Ratio': self.broker.get_sharperatio()
})

def run_backtest(scene, df):
cerebro = bt.Cerebro()
data = bt.feeds.PandasData(dataname=df)
cerebro.adddata(data)

if scene['indicator_name'] == 'SMA':
cerebro.addstrategy(SMAStrategy, period=scene['period'])
# Add more strategies as needed

cerebro.metrics = []
cerebro.run()

return cerebro.metrics
Loading

0 comments on commit 26236d1

Please sign in to comment.