Skip to main content

Deploying with Docker Compose

Requirements

  1. Install Docker and Docker Compose.
  2. Ensure your machine has at least 2 CPU cores and 4GB of RAM.
  3. Review the following docker-compose.yml and configuration files to ensure they meet your requirements.

Note: This deployment method is designed for quick and lightweight setups. Optional services like Elasticsearch and Redis are not included in the Docker Compose file to keep the deployment lighter. For production environments, it is recommended to deploy RepoFlow using the Helm chart on a Kubernetes cluster. Refer to the Helm deployment guide for more information.

Installation

You can download a ready to use bundle extract it and jump to step 5, or continue with the step by step guide below.

1. Create the Docker Compose File

Create a file named docker-compose.yml in your project directory with the following content:

Docker Compose Configuration
name: repoflow

services:
nginx:
image: nginxinc/nginx-unprivileged:1.29.3-alpine
restart: unless-stopped
ports:
- "9080:8080"
volumes:
- ./nginx/default.conf:/etc/nginx/conf.d/default.conf
depends_on:
- server
- client
- hasura
deploy:
replicas: 1
networks:
- repoflow-net

client:
image: api.repoflow.io/repoflow-public/docker-public/library/repoflow-client:0.8.0
restart: unless-stopped
volumes:
- ./client/env.js:/usr/share/nginx/html/env.js:ro
- ./client/analytics.js:/usr/share/nginx/html/analytics.js:ro
deploy:
replicas: 1
networks:
- repoflow-net

server:
image: api.repoflow.io/repoflow-public/docker-public/library/repoflow-server:0.8.0
restart: unless-stopped
env_file:
- ./secrets.env
environment:
- IS_PRINT_ENV=true
- SERVER_PORT=3000
- SERVER_URL=http://localhost:9080/api
- FRONTEND_URL=http://localhost:9080
- TMP_FOLDER=/tmp
- S3_USE_SSL=false
- S3_PORT=9000
- S3_END_POINT=minio
- S3_BUCKET=repoflow
- S3_USE_PRE_SIGNED_URL=false
- INSTALLATION_METHOD=docker-compose
- HASURA_URL=http://hasura:8080/v1/graphql
- HASURA_URL_REST=http://hasura:8080/api/rest
- IS_SMART_SEARCH_ENABLED=false
- DEFAULT_SEARCH_LIMIT=10
- IS_REDIS_ENABLED=false
- IS_REMOTE_CACHE_ENABLED=true
- COOKIE_EXPIRY_IN_SECONDS=2592000
- JWS_ALGORITHM=HS256
- DEFAULT_ADMIN_USER_NAME=admin
- DEFAULT_ADMIN_PASSWORD=password
volumes:
- server-logs:/var/log/repoflow
- grype-db:/srv/vulnerabilitiesScanning
depends_on:
- hasura
deploy:
replicas: 1
networks:
- repoflow-net

postgresql:
image: postgres:14
restart: unless-stopped
environment:
POSTGRES_USER: user
POSTGRES_PASSWORD: password
POSTGRES_DB: repoflow
volumes:
- postgresql-data:/var/lib/postgresql/data
healthcheck:
test: ["CMD-SHELL", "pg_isready -U user -d repoflow -h 127.0.0.1"]
interval: 5s
timeout: 5s
retries: 20
start_period: 10s
deploy:
replicas: 1
networks:
- repoflow-net

minio:
image: minio/minio:RELEASE.2025-07-23T15-54-02Z
restart: unless-stopped
env_file:
- ./secrets.env
volumes:
- minio-data:/data
command: server /data --console-address ":9001"
deploy:
replicas: 1
networks:
- repoflow-net

hasura:
image: hasura/graphql-engine:v2.48.1
restart: unless-stopped
env_file:
- ./secrets.env
environment:
- HASURA_GRAPHQL_ENABLE_CONSOLE=true
- HASURA_GRAPHQL_DEV_MODE=true
- HASURA_GRAPHQL_ENABLED_LOG_TYPES=startup,http-log,webhook-log,websocket-log,query-log
- HASURA_GRAPHQL_ENABLE_TELEMETRY=false
- HASURA_GRAPHQL_UNAUTHORIZED_ROLE=anonymous
- HASURA_GRAPHQL_METADATA_DATABASE_URL=postgresql://user:password@postgresql:5432/repoflow
- HASURA_GRAPHQL_DATABASE_URL=postgresql://user:password@postgresql:5432/repoflow
depends_on:
postgresql:
condition: service_healthy
networks:
- repoflow-net

networks:
repoflow-net:
driver: bridge

volumes:
minio-data:
server-logs:
postgresql-data:
grype-db:

2. Create the Nginx Configuration

Create a file nginx/default.conf with the following content:

Nginx Config
map $http_upgrade $connection_upgrade {
default upgrade;
'' close;
}

map $request_uri $api_uri {
~^/api(?<rest>/.*)$ $rest;
default $request_uri;
}

client_max_body_size 10G;

server {
listen 8080 default_server;

add_header X-Frame-Options "DENY";
add_header Content-Security-Policy "frame-ancestors 'none';";

location /v2/ {
proxy_read_timeout 600s;
proxy_request_buffering off;
rewrite ^/v2/(.*)$ /v2/$1 break;
proxy_pass http://server:3000;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}

location /api/ {
proxy_read_timeout 600s;
proxy_request_buffering off;
proxy_pass http://server:3000$api_uri;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}

location /hasura {
proxy_pass http://hasura:8080/v1/graphql;
proxy_http_version 1.1;
proxy_set_header Upgrade $http_upgrade;
proxy_set_header Connection "Upgrade";
}

location / {
proxy_pass http://client:8080/;
proxy_redirect off;
proxy_set_header Host $host;
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
}
}

3. Create the Client Environment File

Create a file client/env.js with the following content:

Client Environment
window.HASURA_API_URL = "/hasura";
window.REPOFLOW_SERVER = "/api";
window.IS_PRINT_ENV = true;
window.DOCS_URL = "/docs";

4. Create the Analytics Script

Create a file client/analytics.js with your analytics script or leave the file empty:

Analytics Script
// Add your analytics script here.

5. Create the Secrets File

Create a file named secrets.env next to your docker-compose.yml.

This file contains all shared secrets used by RepoFlow, Hasura, and MinIO. You have two options:

  1. Recommended: Run the generator script below. It will create secrets.env with strong random values using openssl.
  2. Manual: Copy the example tab and replace every CHANGE_ME with your own secure values.
generate_secrets.sh
S3_ACCESS_KEY="$(openssl rand -hex 16)"
S3_SECRET_KEY="$(openssl rand -hex 32)"
JWT_SECRET="$(openssl rand -hex 64)"
HASURA_ADMIN_SECRET="$(openssl rand -hex 32)"

cat > secrets.env <<EOF
# ======================
# RepoFlow secrets
# ======================

# Used for signing cookies
GENERAL_COOKIE_SECRET=$(openssl rand -hex 32)

# ======================
# RepoFlow server JWTs
# ======================

# MUST MATCH HASURA_GRAPHQL_JWT_SECRET.key
JWT_SECRET=$JWT_SECRET

RESET_PASSWORD_JWT_SECRET=$(openssl rand -hex 64)
PERSONAL_ACCESS_TOKEN_JWT_SECRET=$(openssl rand -hex 64)

# MUST MATCH HASURA_GRAPHQL_ADMIN_SECRET
HASURA_ADMIN_SECRET=$HASURA_ADMIN_SECRET

# ======================
# S3 / MinIO credentials
# These values MUST match
# ======================

S3_ACCESS_KEY=$S3_ACCESS_KEY
S3_SECRET_KEY=$S3_SECRET_KEY
MINIO_ROOT_USER=$S3_ACCESS_KEY
MINIO_ROOT_PASSWORD=$S3_SECRET_KEY

# ======================
# Hasura
# ======================

# MUST MATCH HASURA_ADMIN_SECRET
HASURA_GRAPHQL_ADMIN_SECRET=$HASURA_ADMIN_SECRET

# MUST MATCH JWT_SECRET
HASURA_GRAPHQL_JWT_SECRET={"type":"HS256","key":"$JWT_SECRET","header":{"type":"Cookie","name":"X-USER-TOKEN"}}
EOF

echo "Created secrets.env"

6. Start RepoFlow

Run the following command:

Command Line
docker-compose up -d

7. Final File and Folder Structure

After completing the above steps, your project directory should look like this:

Project Structure
📁 RepoFlow/
├── docker-compose.yml
├── secrets.env
├── 📁 nginx/
│ └── default.conf
└── 📁 client/
├── env.js
└── analytics.js

8. Next Steps

  1. Access RepoFlow at http://localhost:9080.
  2. Log in using the default credentials:
    • Username: admin
    • Password: password
  3. Follow the guides to create a workspace and add repositories.

Tip: For production grade deployments, switch to Kubernetes with Helm charts for better scalability and reliability.