Skip to content

Commit

Permalink
Merge pull request #120 from F33RNI/next
Browse files Browse the repository at this point in the history
Next
  • Loading branch information
F33RNI authored Dec 25, 2023
2 parents c375521 + 73390f1 commit 7266d2c
Show file tree
Hide file tree
Showing 4 changed files with 117 additions and 41 deletions.
68 changes: 68 additions & 0 deletions .github/workflows/release.yml
Original file line number Diff line number Diff line change
@@ -0,0 +1,68 @@
name: Build binary files for release

on:
release:
types:
- 'created'
- 'edited'

jobs:
build-linux:
strategy:
fail-fast: false
matrix:
platform:
- os: linux
arch: amd64
- os: linux
arch: arm64

runs-on: 'ubuntu-latest'

steps:
- name: Check out code
uses: actions/checkout@v4
- name: Set up QEMU
uses: docker/setup-qemu-action@v3
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Build Docker Image
id: build
uses: docker/build-push-action@v5
with:
context: .
platforms: ${{ matrix.platform.os }}/${{ matrix.platform.arch }}
outputs: docker-output
cache-from: type=gha
cache-to: type=gha,mode=max
- run: mkdir dist
- run: cp docker-output/app/telegramus dist/telegramus-${{ matrix.platform.os}}-${{ matrix.platform.arch }}
- name: Release
uses: softprops/action-gh-release@v1
with:
files: |
dist/*
build:
strategy:
fail-fast: false
matrix:
os:
- 'windows-latest'
- 'ubuntu-latest'
- 'macos-latest'

runs-on: ${{ matrix.os }}

steps:
- uses: actions/checkout@v4
- uses: actions/setup-python@v5
with:
python-version: '3.10'
- run: pip install -r requirements.txt pyinstaller
- run: pyinstaller --hidden-import tiktoken_ext.openai_public --onefile --name telegramus-${{ matrix.os }} main.py
- name: Release
uses: softprops/action-gh-release@v1
with:
files: |
dist/*
41 changes: 29 additions & 12 deletions BotHandler.py
Original file line number Diff line number Diff line change
Expand Up @@ -132,6 +132,7 @@ async def send_message_async(
messages: List[Dict],
request_response: RequestResponseContainer.RequestResponseContainer,
end=False,
plain_text=False,
):
"""
Prepare message and send
Expand All @@ -154,7 +155,9 @@ async def send_message_async(
if response_len == 0 and len(request_response.response_images) == 0:
request_response.response = messages["empty_message"]

await _send_prepared_message_async(config, messages, request_response, end)
await _send_prepared_message_async(
config, messages, request_response, end, plain_text
)

# Error?
except Exception as e:
Expand All @@ -169,6 +172,7 @@ async def _send_prepared_message_async(
messages: Dict,
request_response: RequestResponseContainer.RequestResponseContainer,
end=False,
plain_text=False,
):
"""
Sends new message or edits current one
Expand All @@ -181,7 +185,7 @@ async def _send_prepared_message_async(
if not should_send_message(config, request_response, end):
return

markup = build_markup(messages, request_response, end)
markup = build_markup(messages, request_response, end, plain_text)
if markup is not None:
request_response.reply_markup = markup

Expand Down Expand Up @@ -226,6 +230,7 @@ def build_markup(
messages: Dict,
request_response: RequestResponseContainer.RequestResponseContainer,
end=False,
plain_text=False,
) -> InlineKeyboardMarkup:
"""
Build markup for the response
Expand All @@ -234,6 +239,9 @@ def build_markup(
:param end:
:return: InlineKeyboardMarkup
"""
if plain_text:
return None

if not end:
# Generate stop button if it's the first message
if request_response.message_id is None or request_response.message_id < 0:
Expand Down Expand Up @@ -310,15 +318,19 @@ async def parse_img(img_source: str):
:return:
"""
try:
res = requests.head(
img_source,
timeout=10,
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.4472.114 Safari/537.36"
},
allow_redirects=True,
loop = asyncio.get_event_loop()
res = await loop.run_in_executor(
None,
lambda: requests.head(
img_source,
timeout=10,
headers={
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) "
"AppleWebKit/537.36 (KHTML, like Gecko) "
"Chrome/91.4472.114 Safari/537.36"
},
allow_redirects=True,
),
)
content_type = res.headers.get("content-type")
if not content_type.startswith("image"):
Expand Down Expand Up @@ -1904,8 +1916,13 @@ async def bot_command_queue(
message += message_
container_counter += 1

request_response = RequestResponseContainer.RequestResponseContainer(
user,
response=message,
reply_message_id=update.effective_message.id,
)
# Send queue content
await _send_safe(user["user_id"], message, context)
await send_message_async(self.config, self.messages, request_response, end=True, plain_text=True)

async def bot_command_chatid(
self, update: Update, context: ContextTypes.DEFAULT_TYPE
Expand Down
41 changes: 16 additions & 25 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -4,43 +4,34 @@
# Use buildkit syntax labs
# https://github.com/moby/buildkit

# First stage: install dependencies
FROM python:3.10-slim AS build
RUN apt-get update
RUN apt-get install -y git build-essential
RUN --mount=type=cache,target=/root/.cache/pip \
apt-get update && \
apt-get install -y git binutils build-essential && \
pip install pyinstaller

WORKDIR /app
# Build and save wheels
# Install dependencies
RUN --mount=type=cache,target=/root/.cache/pip \
--mount=type=bind,source=requirements.txt,target=requirements.txt \
pip wheel --wheel-dir=/wheels -r requirements.txt

# Second stage: compile our application
FROM python:3.10-slim AS compile
RUN mkdir -p /lib
RUN mkdir -p /lib64
RUN apt-get update
RUN apt-get install -y binutils
RUN --mount=type=cache,target=/root/.cache/pip pip install pyinstaller

# Install built dependencies
RUN --mount=type=bind,from=build,source=/wheels,target=/wheels pip install --no-index /wheels/*
pip install -r requirements.txt

WORKDIR /src
RUN --mount=type=bind,source=. \
pyinstaller --specpath /app --distpath /app/dist --workpath /app/work \
--collect-all tls_client --collect-all tiktoken_ext.openai_public \
--onefile --name main main.py
--hidden-import tiktoken_ext.openai_public \
--onefile --name telegramus main.py

# Build application image
FROM alpine
ENV TELEGRAMUS_CONFIG_FILE "config.json"
ENV TELEGRAMUS_CONFIG_FILE "/app/config.json"
ENV PATH /app:$PATH

COPY --link --from=compile /lib /lib
COPY --link --from=compile /lib64 /lib64
COPY --link --from=compile /app/dist/main /app/telegramus
COPY --link --from=python:3.10-slim /li[b] /lib
COPY --link --from=python:3.10-slim /lib6[4] /lib64
COPY --link --from=build /app/dist/telegramus /app/telegramus

WORKDIR /app
ADD config.json messages.json /app/
COPY config.json messages.json /app/

# Run main script
CMD ["/app/telegramus"]
CMD ["telegramus"]
8 changes: 4 additions & 4 deletions main.py
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@
from JSONReaderWriter import load_json

# GPT-Telegramus version
__version__ = "4.0.1"
__version__ = "4.1.0"

# Logging level
LOGGING_LEVEL = logging.INFO
Expand All @@ -61,12 +61,12 @@ def main():
Main entry
:return:
"""
# Parse arguments
args = parse_args()

# Multiprocessing fix for Windows
if sys.platform.startswith("win"):
multiprocessing.freeze_support()

# Parse arguments
args = parse_args()

# Initialize logging and start listener as process
logging_handler = LoggingHandler.LoggingHandler()
Expand Down

0 comments on commit 7266d2c

Please sign in to comment.