Commit be856e41 authored by Raul Sirel's avatar Raul Sirel
Browse files

mlp worker in docker

parent ff6f1689
Pipeline #4454 canceled with stage
......@@ -3,6 +3,7 @@ image: debian:buster
stages:
- test
- build
- build-worker
Test:
before_script:
......@@ -26,3 +27,14 @@ Build:
- twine upload dist/*
only:
- tags
BuildWorker:
stage: build-worker
tags:
- docker
script:
- docker login -u $CI_DEPLOY_USER -p $CI_DEPLOY_PASSWORD docker.texta.ee
- sh ./worker/build_and_push.sh
- docker system prune --volumes -f
only:
- tags
#!/bin/bash
# retrieve version from file
version_file="./VERSION"
version=$(cat "$version_file")
# build latest image
docker build --compress --force-rm --no-cache -t docker.texta.ee/texta/texta-mlp-python/mlp-worker:latest -f ./worker/cpu.Dockerfile ./worker
# build latest GPU image
#docker build --compress --force-rm --no-cache -t docker.texta.ee/texta/texta-mlp-python/mlp-worker:latest-gpu -f ./worker/gpu.Dockerfile ./worker
# tag version
#docker tag docker.texta.ee/texta/texta-mlp-python/mlp-worker:latest docker.texta.ee/texta/texta-mlp-python/mlp-worker:$version
#docker tag docker.texta.ee/texta/texta-mlp-python/mlp-worker:latest-gpu docker.texta.ee/texta/texta-mlp-python/mlp-worker:$version-gpu
# push version tag
#docker push docker.texta.ee/texta/texta-mlp-python/mlp-worker:$version
#docker push docker.texta.ee/texta/texta-mlp-python/mlp-worker:$version-gpu
# push latest tag
docker push docker.texta.ee/texta/texta-mlp-python/mlp-worker:latest
#docker push docker.texta.ee/texta/texta-mlp-python/mlp-worker:latest-gpu
......@@ -5,7 +5,7 @@ RUN mkdir /var/texta-mlp
WORKDIR /var/texta-mlp
# install requirements
COPY ./environment.yaml ./environment.yaml
COPY ./environment-cpu.yaml ./environment.yaml
RUN conda env create -f environment.yaml \
# conda clean up
&& conda clean -afy \
......@@ -16,6 +16,7 @@ RUN conda env create -f environment.yaml \
# copy files
COPY ./supervisord.conf /opt/conda/envs/texta-mlp/etc/supervisord/conf.d/supervisord.conf
COPY ./entrypoint.sh ./entrypoint.sh
COPY ./settings.py ./settings.py
COPY ./taskman.py ./taskman.py
# create dir for data
......
......@@ -13,17 +13,9 @@ services:
container_name: texta-mlp-worker
build:
context: .
dockerfile: ./Dockerfile
dockerfile: ./cpu.Dockerfile
volumes:
- mlp-data:/var/texta-mlp/data
texta-mlp-tester:
container_name: texta-mlp-tester
build:
context: .
dockerfile: ./tester.Dockerfile
depends_on:
- texta-mlp-worker
volumes:
mlp-data:
name: texta-mlp
channels:
- conda-forge
dependencies:
- python=3.7
- pip
- supervisor
- lxml
- pip:
- texta-mlp
- celery==5.*
- redis==3.*
- torch==1.5.1+cpu
- -f https://download.pytorch.org/whl/torch_stable.html
......@@ -2,9 +2,9 @@ import os
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# env variables
MLP_WORKER_LANGUAGE_CODES = os.getenv("MLP_WORKER_LANGUAGE_CODES", "et").split(",")
MLP_WORKER_DEFAULT_LANGUAGE_CODE = os.getenv("MLP_WORKER_DEFAULT_LANGUAGE_CODES", "et")
MLP_WORKER_RESOURCE_DIR = os.getenv("MLP_WORKER_RESOURCE_DIR", "/var/data")
MLP_WORKER_BROKER = os.getenv("MLP_WORKER_BROKER", "redis://mlp-redis:6379/0")
MLP_WORKER_RESULT_BACKEND = os.getenv("MLP_WORKER_RESULT_BACKEND", "redis://mlp-redis:6379/0")
from celery import shared_task
from celery import Celery
from typing import Optional
import logging
from texta_mlp.mlp import MLP
from worker.settings import (
from settings import (
MLP_WORKER_LANGUAGE_CODES,
MLP_WORKER_DEFAULT_LANGUAGE_CODE,
MLP_WORKER_RESOURCE_DIR
MLP_WORKER_RESOURCE_DIR,
MLP_WORKER_BROKER,
MLP_WORKER_RESULT_BACKEND
)
# Create Celery app with proper conf
app = Celery("worker")
app.conf.broker_url = MLP_WORKER_BROKER
app.conf.result_backend = MLP_WORKER_RESULT_BACKEND
ml_processor: Optional[MLP] = None
# start logging
logging.basicConfig(format='%(levelname)s %(asctime)s: %(message)s',
datefmt='%d.%m.%Y %H:%M:%S',
level=logging.INFO)
# Global MLP object for the worker so it won't get reloaded on each task
ml_processor: Optional[MLP] = None
def load_mlp():
global ml_processor
......
from celery import Celery
from worker import taskman
app = Celery("worker")
app.conf.broker_url = "redis://localhost:6379/0"
app.conf.result_backend = "redis://localhost:6379/0"
'''
@app.task
def mlp_task():
taskman.mlp()
'''
print("init")
FROM continuumio/miniconda3:latest
# create dir for MLP tester
RUN mkdir /var/texta-mlp-tester
WORKDIR /var/texta-mlp-tester
# install requirements
COPY ./environment.yaml ./environment.yaml
RUN conda env create -f environment.yaml \
# conda clean up
&& conda clean -afy \
&& find /opt/conda/ -follow -type f -name '*.a' -delete \
&& find /opt/conda/ -follow -type f -name '*.pyc' -delete \
&& find /opt/conda/ -follow -type f -name '*.js.map' -delete
# copy files
COPY ./test_mlp_worker.py ./test_mlp_worker.py
COPY ./taskman.py ./taskman.py
EXPOSE 6379
RUN chmod +x /var/texta-mlp-tester/test_mlp_worker.py
ENTRYPOINT ["conda", "run", "-n", "texta-mlp", "python3", "test_mlp_worker.py"]
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment