Compare commits
10 Commits
72681217ef
...
v0.0.3
| Author | SHA1 | Date | |
|---|---|---|---|
| 1385736556 | |||
| 9faad8be6b | |||
| faa5dc0d9d | |||
| 0cc03aa145 | |||
| 12bdcab180 | |||
| 4069647495 | |||
| 53afea9352 | |||
| 58319b66ee | |||
| 1c5dc7f751 | |||
| b2d2252342 |
240
.gitea/workflows/ci.yml
Normal file
240
.gitea/workflows/ci.yml
Normal file
@@ -0,0 +1,240 @@
|
||||
name: CI
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
pull_request:
|
||||
branches: [main]
|
||||
|
||||
env:
|
||||
NTFY_URL: http://ntfy.observability.svc.cluster.local:80
|
||||
REGISTRY: gitea-http.gitea.svc.cluster.local:3000/daviestechlabs
|
||||
REGISTRY_HOST: gitea-http.gitea.svc.cluster.local:3000
|
||||
IMAGE_NAME: gradio-ui
|
||||
KUSTOMIZE_NAMESPACE: ai-ml
|
||||
|
||||
jobs:
|
||||
lint:
|
||||
name: Lint
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up uv
|
||||
run: curl -LsSf https://astral.sh/uv/install.sh | sh && echo "$HOME/.local/bin" >> $GITHUB_PATH
|
||||
|
||||
- name: Run ruff check
|
||||
run: uvx ruff check .
|
||||
|
||||
- name: Run ruff format check
|
||||
run: uvx ruff format --check .
|
||||
|
||||
release:
|
||||
name: Release
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint]
|
||||
if: gitea.ref == 'refs/heads/main' && gitea.event_name == 'push'
|
||||
outputs:
|
||||
version: ${{ steps.version.outputs.version }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Determine version bump
|
||||
id: version
|
||||
run: |
|
||||
LATEST=$(git describe --tags --abbrev=0 2>/dev/null || echo "v0.0.0")
|
||||
VERSION=${LATEST#v}
|
||||
IFS='.' read -r MAJOR MINOR PATCH <<< "$VERSION"
|
||||
|
||||
MSG="${{ gitea.event.head_commit.message }}"
|
||||
if echo "$MSG" | grep -qiE "^major:|BREAKING CHANGE"; then
|
||||
MAJOR=$((MAJOR + 1)); MINOR=0; PATCH=0
|
||||
BUMP="major"
|
||||
elif echo "$MSG" | grep -qiE "^(minor:|feat:)"; then
|
||||
MINOR=$((MINOR + 1)); PATCH=0
|
||||
BUMP="minor"
|
||||
else
|
||||
PATCH=$((PATCH + 1))
|
||||
BUMP="patch"
|
||||
fi
|
||||
|
||||
NEW_VERSION="v${MAJOR}.${MINOR}.${PATCH}"
|
||||
echo "version=$NEW_VERSION" >> $GITHUB_OUTPUT
|
||||
echo "bump=$BUMP" >> $GITHUB_OUTPUT
|
||||
echo "Bumping $LATEST → $NEW_VERSION ($BUMP)"
|
||||
|
||||
- name: Create and push tag
|
||||
run: |
|
||||
git config user.name "gitea-actions[bot]"
|
||||
git config user.email "actions@git.daviestechlabs.io"
|
||||
git tag -a ${{ steps.version.outputs.version }} -m "Release ${{ steps.version.outputs.version }}"
|
||||
git push origin ${{ steps.version.outputs.version }}
|
||||
|
||||
docker:
|
||||
name: Docker Build & Push
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint, release]
|
||||
if: gitea.ref == 'refs/heads/main' && gitea.event_name == 'push'
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
with:
|
||||
buildkitd-config-inline: |
|
||||
[registry."gitea-http.gitea.svc.cluster.local:3000"]
|
||||
http = true
|
||||
insecure = true
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: vars.DOCKERHUB_USERNAME != ''
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ vars.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Configure Docker for insecure registry
|
||||
run: |
|
||||
sudo mkdir -p /etc/docker
|
||||
echo '{"insecure-registries": ["${{ env.REGISTRY_HOST }}"]}' | sudo tee /etc/docker/daemon.json
|
||||
sudo systemctl restart docker || sudo service docker restart || true
|
||||
sleep 2
|
||||
|
||||
- name: Login to Gitea Registry
|
||||
run: |
|
||||
AUTH=$(echo -n "${{ secrets.REGISTRY_USER }}:${{ secrets.REGISTRY_TOKEN }}" | base64 -w0)
|
||||
mkdir -p ~/.docker
|
||||
cat > ~/.docker/config.json << EOF
|
||||
{
|
||||
"auths": {
|
||||
"${{ env.REGISTRY_HOST }}": {
|
||||
"auth": "$AUTH"
|
||||
}
|
||||
}
|
||||
}
|
||||
EOF
|
||||
echo "Auth configured for ${{ env.REGISTRY_HOST }}"
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}
|
||||
tags: |
|
||||
type=semver,pattern={{version}},value=${{ needs.release.outputs.version }}
|
||||
type=semver,pattern={{major}}.{{minor}},value=${{ needs.release.outputs.version }}
|
||||
type=raw,value=latest,enable={{is_default_branch}}
|
||||
|
||||
- name: Build and push
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: .
|
||||
push: true
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha
|
||||
cache-to: type=gha,mode=max
|
||||
|
||||
deploy:
|
||||
name: Deploy to Kubernetes
|
||||
runs-on: ubuntu-latest
|
||||
needs: [docker, release]
|
||||
if: gitea.ref == 'refs/heads/main' && gitea.event_name == 'push'
|
||||
container:
|
||||
image: catthehacker/ubuntu:act-latest
|
||||
volumes:
|
||||
- /secrets/kubeconfig:/secrets/kubeconfig
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Install kubectl
|
||||
run: |
|
||||
curl -LO "https://dl.k8s.io/release/$(curl -Ls https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
|
||||
chmod +x kubectl && sudo mv kubectl /usr/local/bin/
|
||||
|
||||
- name: Update image tag in manifests
|
||||
env:
|
||||
KUBECONFIG: /secrets/kubeconfig/config
|
||||
run: |
|
||||
VERSION="${{ needs.release.outputs.version }}"
|
||||
VERSION="${VERSION#v}"
|
||||
for DEPLOY in llm embeddings stt tts; do
|
||||
sed -i "s|image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:.*|image: ${{ env.REGISTRY }}/${{ env.IMAGE_NAME }}:${VERSION}|" "${DEPLOY}.yaml"
|
||||
done
|
||||
|
||||
- name: Apply kustomization
|
||||
env:
|
||||
KUBECONFIG: /secrets/kubeconfig/config
|
||||
run: |
|
||||
kubectl apply -k . --namespace ${{ env.KUSTOMIZE_NAMESPACE }}
|
||||
|
||||
- name: Rollout restart deployments
|
||||
env:
|
||||
KUBECONFIG: /secrets/kubeconfig/config
|
||||
run: |
|
||||
for DEPLOY in llm-ui embeddings-ui stt-ui tts-ui; do
|
||||
kubectl rollout restart deployment/${DEPLOY} -n ${{ env.KUSTOMIZE_NAMESPACE }} 2>/dev/null || true
|
||||
done
|
||||
|
||||
- name: Wait for rollout
|
||||
env:
|
||||
KUBECONFIG: /secrets/kubeconfig/config
|
||||
run: |
|
||||
for DEPLOY in llm-ui embeddings-ui stt-ui tts-ui; do
|
||||
kubectl rollout status deployment/${DEPLOY} -n ${{ env.KUSTOMIZE_NAMESPACE }} --timeout=120s 2>/dev/null || true
|
||||
done
|
||||
|
||||
notify:
|
||||
name: Notify
|
||||
runs-on: ubuntu-latest
|
||||
needs: [lint, release, docker, deploy]
|
||||
if: always()
|
||||
steps:
|
||||
- name: Notify on success
|
||||
if: needs.lint.result == 'success' && needs.docker.result == 'success'
|
||||
run: |
|
||||
curl -s \
|
||||
-H "Title: ✅ CI Passed: ${{ gitea.repository }}" \
|
||||
-H "Priority: default" \
|
||||
-H "Tags: white_check_mark,github" \
|
||||
-H "Click: ${{ gitea.server_url }}/${{ gitea.repository }}/actions/runs/${{ gitea.run_id }}" \
|
||||
-d "Branch: ${{ gitea.ref_name }}
|
||||
Commit: ${{ gitea.event.head_commit.message || gitea.sha }}
|
||||
Release: ${{ needs.release.result == 'success' && needs.release.outputs.version || 'skipped' }}
|
||||
Docker: ${{ needs.docker.result }}
|
||||
Deploy: ${{ needs.deploy.result }}" \
|
||||
${{ env.NTFY_URL }}/gitea-ci
|
||||
|
||||
- name: Notify on deploy success
|
||||
if: needs.deploy.result == 'success'
|
||||
run: |
|
||||
curl -s \
|
||||
-H "Title: 🚀 Deployed: ${{ gitea.repository }}" \
|
||||
-H "Priority: default" \
|
||||
-H "Tags: rocket,kubernetes" \
|
||||
-H "Click: ${{ gitea.server_url }}/${{ gitea.repository }}/actions/runs/${{ gitea.run_id }}" \
|
||||
-d "Version: ${{ needs.release.outputs.version }}
|
||||
Namespace: ${{ env.KUSTOMIZE_NAMESPACE }}
|
||||
Apps: llm-ui, embeddings-ui, stt-ui, tts-ui" \
|
||||
${{ env.NTFY_URL }}/gitea-ci
|
||||
|
||||
- name: Notify on failure
|
||||
if: needs.lint.result == 'failure' || needs.docker.result == 'failure' || needs.deploy.result == 'failure'
|
||||
run: |
|
||||
curl -s \
|
||||
-H "Title: ❌ CI Failed: ${{ gitea.repository }}" \
|
||||
-H "Priority: high" \
|
||||
-H "Tags: x,github" \
|
||||
-H "Click: ${{ gitea.server_url }}/${{ gitea.repository }}/actions/runs/${{ gitea.run_id }}" \
|
||||
-d "Branch: ${{ gitea.ref_name }}
|
||||
Commit: ${{ gitea.event.head_commit.message || gitea.sha }}
|
||||
Lint: ${{ needs.lint.result }}
|
||||
Docker: ${{ needs.docker.result }}
|
||||
Deploy: ${{ needs.deploy.result }}" \
|
||||
${{ env.NTFY_URL }}/gitea-ci
|
||||
137
embeddings.py
137
embeddings.py
@@ -9,6 +9,7 @@ Features:
|
||||
- MLflow metrics logging
|
||||
- Visual embedding dimension display
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
import logging
|
||||
@@ -28,12 +29,77 @@ logger = logging.getLogger("embeddings-demo")
|
||||
EMBEDDINGS_URL = os.environ.get(
|
||||
"EMBEDDINGS_URL",
|
||||
# Default: Ray Serve Embeddings endpoint
|
||||
"http://ai-inference-serve-svc.ai-ml.svc.cluster.local:8000/embeddings"
|
||||
)
|
||||
MLFLOW_TRACKING_URI = os.environ.get(
|
||||
"MLFLOW_TRACKING_URI",
|
||||
"http://mlflow.mlflow.svc.cluster.local:80"
|
||||
"http://ai-inference-serve-svc.ai-ml.svc.cluster.local:8000/embeddings",
|
||||
)
|
||||
# ─── MLflow experiment tracking ──────────────────────────────────────────
|
||||
try:
|
||||
import mlflow
|
||||
from mlflow.tracking import MlflowClient
|
||||
|
||||
MLFLOW_TRACKING_URI = os.environ.get(
|
||||
"MLFLOW_TRACKING_URI",
|
||||
"http://mlflow.mlflow.svc.cluster.local:80",
|
||||
)
|
||||
mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)
|
||||
_mlflow_client = MlflowClient()
|
||||
|
||||
_experiment = _mlflow_client.get_experiment_by_name("gradio-embeddings-tuning")
|
||||
if _experiment is None:
|
||||
_experiment_id = _mlflow_client.create_experiment(
|
||||
"gradio-embeddings-tuning",
|
||||
artifact_location="/mlflow/artifacts/gradio-embeddings-tuning",
|
||||
)
|
||||
else:
|
||||
_experiment_id = _experiment.experiment_id
|
||||
|
||||
_mlflow_run = mlflow.start_run(
|
||||
experiment_id=_experiment_id,
|
||||
run_name=f"gradio-embeddings-{os.environ.get('HOSTNAME', 'local')}",
|
||||
tags={"service": "gradio-embeddings", "endpoint": EMBEDDINGS_URL},
|
||||
)
|
||||
_mlflow_run_id = _mlflow_run.info.run_id
|
||||
_mlflow_step = 0
|
||||
MLFLOW_ENABLED = True
|
||||
logger.info(
|
||||
"MLflow tracking enabled: experiment=%s run=%s", _experiment_id, _mlflow_run_id
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning("MLflow tracking disabled: %s", exc)
|
||||
_mlflow_client = None
|
||||
_mlflow_run_id = None
|
||||
_mlflow_step = 0
|
||||
MLFLOW_ENABLED = False
|
||||
|
||||
|
||||
def _log_embedding_metrics(
|
||||
latency: float, batch_size: int, embedding_dims: int = 0
|
||||
) -> None:
|
||||
"""Log embedding inference metrics to MLflow (non-blocking best-effort)."""
|
||||
global _mlflow_step
|
||||
if not MLFLOW_ENABLED or _mlflow_client is None:
|
||||
return
|
||||
try:
|
||||
_mlflow_step += 1
|
||||
ts = int(time.time() * 1000)
|
||||
_mlflow_client.log_batch(
|
||||
_mlflow_run_id,
|
||||
metrics=[
|
||||
mlflow.entities.Metric("latency_s", latency, ts, _mlflow_step),
|
||||
mlflow.entities.Metric("batch_size", batch_size, ts, _mlflow_step),
|
||||
mlflow.entities.Metric(
|
||||
"embedding_dims", embedding_dims, ts, _mlflow_step
|
||||
),
|
||||
mlflow.entities.Metric(
|
||||
"latency_per_text_ms",
|
||||
(latency * 1000 / batch_size) if batch_size > 0 else 0,
|
||||
ts,
|
||||
_mlflow_step,
|
||||
),
|
||||
],
|
||||
)
|
||||
except Exception:
|
||||
logger.debug("MLflow log failed", exc_info=True)
|
||||
|
||||
|
||||
# HTTP client
|
||||
client = httpx.Client(timeout=60.0)
|
||||
@@ -44,8 +110,7 @@ def get_embeddings(texts: list[str]) -> tuple[list[list[float]], float]:
|
||||
start_time = time.time()
|
||||
|
||||
response = client.post(
|
||||
f"{EMBEDDINGS_URL}/embeddings",
|
||||
json={"input": texts, "model": "bge"}
|
||||
f"{EMBEDDINGS_URL}/embeddings", json={"input": texts, "model": "bge"}
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
@@ -77,8 +142,11 @@ def generate_single_embedding(text: str) -> tuple[str, str, str]:
|
||||
embedding = embeddings[0]
|
||||
dims = len(embedding)
|
||||
|
||||
# Log to MLflow
|
||||
_log_embedding_metrics(latency, batch_size=1, embedding_dims=dims)
|
||||
|
||||
# Format output
|
||||
status = f"✅ Generated {dims}-dimensional embedding in {latency*1000:.1f}ms"
|
||||
status = f"✅ Generated {dims}-dimensional embedding in {latency * 1000:.1f}ms"
|
||||
|
||||
# Show first/last few dimensions
|
||||
preview = f"Dimensions: {dims}\n\n"
|
||||
@@ -96,7 +164,7 @@ def generate_single_embedding(text: str) -> tuple[str, str, str]:
|
||||
- Mean: {np.mean(embedding):.6f}
|
||||
- Std: {np.std(embedding):.6f}
|
||||
- L2 Norm: {np.linalg.norm(embedding):.6f}
|
||||
- Latency: {latency*1000:.1f}ms
|
||||
- Latency: {latency * 1000:.1f}ms
|
||||
"""
|
||||
|
||||
return status, preview, stats
|
||||
@@ -119,6 +187,9 @@ def compare_texts(text1: str, text2: str) -> tuple[str, str]:
|
||||
|
||||
similarity = cosine_similarity(embeddings[0], embeddings[1])
|
||||
|
||||
# Log to MLflow
|
||||
_log_embedding_metrics(latency, batch_size=2, embedding_dims=len(embeddings[0]))
|
||||
|
||||
# Determine similarity level
|
||||
if similarity > 0.9:
|
||||
level = "🟢 Very High"
|
||||
@@ -141,14 +212,14 @@ def compare_texts(text1: str, text2: str) -> tuple[str, str]:
|
||||
{desc}
|
||||
|
||||
---
|
||||
*Computed in {latency*1000:.1f}ms*
|
||||
*Computed in {latency * 1000:.1f}ms*
|
||||
"""
|
||||
|
||||
# Create a simple visual bar
|
||||
bar_length = 50
|
||||
filled = int(similarity * bar_length)
|
||||
bar = "█" * filled + "░" * (bar_length - filled)
|
||||
visual = f"[{bar}] {similarity*100:.1f}%"
|
||||
visual = f"[{bar}] {similarity * 100:.1f}%"
|
||||
|
||||
return result, visual
|
||||
|
||||
@@ -167,8 +238,15 @@ def batch_embed(texts_input: str) -> tuple[str, str]:
|
||||
try:
|
||||
embeddings, latency = get_embeddings(texts)
|
||||
|
||||
status = f"✅ Generated {len(embeddings)} embeddings in {latency*1000:.1f}ms"
|
||||
status += f" ({latency*1000/len(texts):.1f}ms per text)"
|
||||
# Log to MLflow
|
||||
_log_embedding_metrics(
|
||||
latency,
|
||||
batch_size=len(embeddings),
|
||||
embedding_dims=len(embeddings[0]) if embeddings else 0,
|
||||
)
|
||||
|
||||
status = f"✅ Generated {len(embeddings)} embeddings in {latency * 1000:.1f}ms"
|
||||
status += f" ({latency * 1000 / len(texts):.1f}ms per text)"
|
||||
|
||||
# Build similarity matrix
|
||||
n = len(embeddings)
|
||||
@@ -181,11 +259,11 @@ def batch_embed(texts_input: str) -> tuple[str, str]:
|
||||
matrix.append(row)
|
||||
|
||||
# Format as table
|
||||
header = "| | " + " | ".join([f"Text {i+1}" for i in range(n)]) + " |"
|
||||
header = "| | " + " | ".join([f"Text {i + 1}" for i in range(n)]) + " |"
|
||||
separator = "|---" + "|---" * n + "|"
|
||||
rows = []
|
||||
for i, row in enumerate(matrix):
|
||||
rows.append(f"| **Text {i+1}** | " + " | ".join(row) + " |")
|
||||
rows.append(f"| **Text {i + 1}** | " + " | ".join(row) + " |")
|
||||
|
||||
table = "\n".join([header, separator] + rows)
|
||||
|
||||
@@ -198,7 +276,7 @@ def batch_embed(texts_input: str) -> tuple[str, str]:
|
||||
**Texts processed:**
|
||||
"""
|
||||
for i, text in enumerate(texts):
|
||||
result += f"\n{i+1}. {text[:50]}{'...' if len(text) > 50 else ''}"
|
||||
result += f"\n{i + 1}. {text[:50]}{'...' if len(text) > 50 else ''}"
|
||||
|
||||
return status, result
|
||||
|
||||
@@ -243,7 +321,7 @@ Generate embeddings, compare text similarity, and explore vector representations
|
||||
single_input = gr.Textbox(
|
||||
label="Input Text",
|
||||
placeholder="Enter text to generate embeddings...",
|
||||
lines=3
|
||||
lines=3,
|
||||
)
|
||||
single_btn = gr.Button("Generate Embedding", variant="primary")
|
||||
|
||||
@@ -256,7 +334,7 @@ Generate embeddings, compare text similarity, and explore vector representations
|
||||
single_btn.click(
|
||||
fn=generate_single_embedding,
|
||||
inputs=single_input,
|
||||
outputs=[single_status, single_preview, single_stats]
|
||||
outputs=[single_status, single_preview, single_stats],
|
||||
)
|
||||
|
||||
# Tab 2: Compare Texts
|
||||
@@ -276,14 +354,17 @@ Generate embeddings, compare text similarity, and explore vector representations
|
||||
compare_btn.click(
|
||||
fn=compare_texts,
|
||||
inputs=[compare_text1, compare_text2],
|
||||
outputs=[compare_result, compare_visual]
|
||||
outputs=[compare_result, compare_visual],
|
||||
)
|
||||
|
||||
# Example pairs
|
||||
gr.Examples(
|
||||
examples=[
|
||||
["The cat sat on the mat.", "A feline was resting on the rug."],
|
||||
["Machine learning is a subset of AI.", "Deep learning uses neural networks."],
|
||||
[
|
||||
"Machine learning is a subset of AI.",
|
||||
"Deep learning uses neural networks.",
|
||||
],
|
||||
["I love pizza.", "The stock market crashed today."],
|
||||
],
|
||||
inputs=[compare_text1, compare_text2],
|
||||
@@ -291,21 +372,21 @@ Generate embeddings, compare text similarity, and explore vector representations
|
||||
|
||||
# Tab 3: Batch Embeddings
|
||||
with gr.TabItem("📚 Batch Processing"):
|
||||
gr.Markdown("Generate embeddings for multiple texts and see their similarity matrix.")
|
||||
gr.Markdown(
|
||||
"Generate embeddings for multiple texts and see their similarity matrix."
|
||||
)
|
||||
|
||||
batch_input = gr.Textbox(
|
||||
label="Texts (one per line)",
|
||||
placeholder="Enter multiple texts, one per line...",
|
||||
lines=6
|
||||
lines=6,
|
||||
)
|
||||
batch_btn = gr.Button("Process Batch", variant="primary")
|
||||
batch_status = gr.Textbox(label="Status", interactive=False)
|
||||
batch_result = gr.Markdown(label="Similarity Matrix")
|
||||
|
||||
batch_btn.click(
|
||||
fn=batch_embed,
|
||||
inputs=batch_input,
|
||||
outputs=[batch_status, batch_result]
|
||||
fn=batch_embed, inputs=batch_input, outputs=[batch_status, batch_result]
|
||||
)
|
||||
|
||||
gr.Examples(
|
||||
@@ -320,8 +401,4 @@ Generate embeddings, compare text similarity, and explore vector representations
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch(
|
||||
server_name="0.0.0.0",
|
||||
server_port=7860,
|
||||
show_error=True
|
||||
)
|
||||
demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True)
|
||||
|
||||
@@ -20,7 +20,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: gradio
|
||||
image: ghcr.io/billy-davies-2/llm-apps:v2-202602120526
|
||||
image: gitea-http.gitea.svc.cluster.local:3000/daviestechlabs/gradio-ui:latest
|
||||
imagePullPolicy: Always
|
||||
command: ["python", "embeddings.py"]
|
||||
ports:
|
||||
|
||||
146
llm.py
146
llm.py
@@ -9,10 +9,10 @@ Features:
|
||||
- Token usage and latency metrics
|
||||
- Chat history management
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
import logging
|
||||
import json
|
||||
|
||||
import gradio as gr
|
||||
import httpx
|
||||
@@ -30,6 +30,92 @@ LLM_URL = os.environ.get(
|
||||
"http://ai-inference-serve-svc.ai-ml.svc.cluster.local:8000/llm",
|
||||
)
|
||||
|
||||
# ─── MLflow experiment tracking ──────────────────────────────────────────
|
||||
try:
|
||||
import mlflow
|
||||
from mlflow.tracking import MlflowClient
|
||||
|
||||
MLFLOW_TRACKING_URI = os.environ.get(
|
||||
"MLFLOW_TRACKING_URI",
|
||||
"http://mlflow.mlflow.svc.cluster.local:80",
|
||||
)
|
||||
mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)
|
||||
_mlflow_client = MlflowClient()
|
||||
|
||||
# Ensure experiment exists
|
||||
_experiment = _mlflow_client.get_experiment_by_name("gradio-llm-tuning")
|
||||
if _experiment is None:
|
||||
_experiment_id = _mlflow_client.create_experiment(
|
||||
"gradio-llm-tuning",
|
||||
artifact_location="/mlflow/artifacts/gradio-llm-tuning",
|
||||
)
|
||||
else:
|
||||
_experiment_id = _experiment.experiment_id
|
||||
|
||||
# One persistent run per Gradio instance
|
||||
_mlflow_run = mlflow.start_run(
|
||||
experiment_id=_experiment_id,
|
||||
run_name=f"gradio-llm-{os.environ.get('HOSTNAME', 'local')}",
|
||||
tags={
|
||||
"service": "gradio-llm",
|
||||
"endpoint": LLM_URL,
|
||||
"mlflow.runName": f"gradio-llm-{os.environ.get('HOSTNAME', 'local')}",
|
||||
},
|
||||
)
|
||||
_mlflow_run_id = _mlflow_run.info.run_id
|
||||
_mlflow_step = 0
|
||||
MLFLOW_ENABLED = True
|
||||
logger.info(
|
||||
"MLflow tracking enabled: experiment=%s run=%s", _experiment_id, _mlflow_run_id
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning("MLflow tracking disabled: %s", exc)
|
||||
_mlflow_client = None
|
||||
_mlflow_run_id = None
|
||||
_mlflow_step = 0
|
||||
MLFLOW_ENABLED = False
|
||||
|
||||
|
||||
def _log_llm_metrics(
|
||||
latency: float,
|
||||
prompt_tokens: int,
|
||||
completion_tokens: int,
|
||||
temperature: float,
|
||||
max_tokens: int,
|
||||
top_p: float,
|
||||
) -> None:
|
||||
"""Log inference metrics to MLflow (non-blocking best-effort)."""
|
||||
global _mlflow_step
|
||||
if not MLFLOW_ENABLED or _mlflow_client is None:
|
||||
return
|
||||
try:
|
||||
_mlflow_step += 1
|
||||
ts = int(time.time() * 1000)
|
||||
total_tokens = prompt_tokens + completion_tokens
|
||||
tps = completion_tokens / latency if latency > 0 else 0
|
||||
_mlflow_client.log_batch(
|
||||
_mlflow_run_id,
|
||||
metrics=[
|
||||
mlflow.entities.Metric("latency_s", latency, ts, _mlflow_step),
|
||||
mlflow.entities.Metric(
|
||||
"prompt_tokens", prompt_tokens, ts, _mlflow_step
|
||||
),
|
||||
mlflow.entities.Metric(
|
||||
"completion_tokens", completion_tokens, ts, _mlflow_step
|
||||
),
|
||||
mlflow.entities.Metric("total_tokens", total_tokens, ts, _mlflow_step),
|
||||
mlflow.entities.Metric("tokens_per_second", tps, ts, _mlflow_step),
|
||||
mlflow.entities.Metric("temperature", temperature, ts, _mlflow_step),
|
||||
mlflow.entities.Metric(
|
||||
"max_tokens_requested", max_tokens, ts, _mlflow_step
|
||||
),
|
||||
mlflow.entities.Metric("top_p", top_p, ts, _mlflow_step),
|
||||
],
|
||||
)
|
||||
except Exception:
|
||||
logger.debug("MLflow log failed", exc_info=True)
|
||||
|
||||
|
||||
DEFAULT_SYSTEM_PROMPT = (
|
||||
"You are a helpful AI assistant running on Davies Tech Labs homelab infrastructure. "
|
||||
"You are powered by Llama 3.1 70B served via vLLM on AMD Strix Halo (ROCm). "
|
||||
@@ -38,7 +124,7 @@ DEFAULT_SYSTEM_PROMPT = (
|
||||
|
||||
# Use async client for streaming
|
||||
async_client = httpx.AsyncClient(timeout=httpx.Timeout(300.0, connect=30.0))
|
||||
sync_client = httpx.Client(timeout=10.0)
|
||||
sync_client = httpx.Client(timeout=httpx.Timeout(60.0, connect=10.0))
|
||||
|
||||
|
||||
async def chat_stream(
|
||||
@@ -90,6 +176,16 @@ async def chat_stream(
|
||||
usage.get("completion_tokens", 0),
|
||||
)
|
||||
|
||||
# Log to MLflow
|
||||
_log_llm_metrics(
|
||||
latency=latency,
|
||||
prompt_tokens=usage.get("prompt_tokens", 0),
|
||||
completion_tokens=usage.get("completion_tokens", 0),
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
top_p=top_p,
|
||||
)
|
||||
|
||||
# Yield text progressively for a nicer streaming feel
|
||||
chunk_size = 4
|
||||
words = text.split(" ")
|
||||
@@ -112,6 +208,13 @@ async def chat_stream(
|
||||
def check_service_health() -> str:
|
||||
"""Check if the LLM service is reachable."""
|
||||
try:
|
||||
# Try a lightweight GET against the Ray Serve base first.
|
||||
# This avoids burning GPU time on a full inference round-trip.
|
||||
base_url = LLM_URL.rsplit("/", 1)[0] # strip /llm path
|
||||
response = sync_client.get(f"{base_url}/-/routes")
|
||||
if response.status_code == 200:
|
||||
return "🟢 LLM service is healthy"
|
||||
# Fall back to a minimal inference probe
|
||||
response = sync_client.post(
|
||||
LLM_URL,
|
||||
json={
|
||||
@@ -125,6 +228,8 @@ def check_service_health() -> str:
|
||||
return f"🟡 LLM responded with status {response.status_code}"
|
||||
except httpx.ConnectError:
|
||||
return "🔴 Cannot connect to LLM service"
|
||||
except httpx.TimeoutException:
|
||||
return "🟡 LLM service is reachable but slow to respond"
|
||||
except Exception as e:
|
||||
return f"🔴 Service unavailable: {e}"
|
||||
|
||||
@@ -164,13 +269,23 @@ def single_prompt(
|
||||
text = result["choices"][0]["message"]["content"]
|
||||
usage = result.get("usage", {})
|
||||
|
||||
# Log to MLflow
|
||||
_log_llm_metrics(
|
||||
latency=latency,
|
||||
prompt_tokens=usage.get("prompt_tokens", 0),
|
||||
completion_tokens=usage.get("completion_tokens", 0),
|
||||
temperature=temperature,
|
||||
max_tokens=max_tokens,
|
||||
top_p=top_p,
|
||||
)
|
||||
|
||||
metrics = f"""
|
||||
**Generation Metrics:**
|
||||
- Latency: {latency:.1f}s
|
||||
- Prompt tokens: {usage.get('prompt_tokens', 'N/A')}
|
||||
- Completion tokens: {usage.get('completion_tokens', 'N/A')}
|
||||
- Total tokens: {usage.get('total_tokens', 'N/A')}
|
||||
- Model: {result.get('model', 'N/A')}
|
||||
- Prompt tokens: {usage.get("prompt_tokens", "N/A")}
|
||||
- Completion tokens: {usage.get("completion_tokens", "N/A")}
|
||||
- Total tokens: {usage.get("total_tokens", "N/A")}
|
||||
- Model: {result.get("model", "N/A")}
|
||||
"""
|
||||
return text, metrics
|
||||
|
||||
@@ -218,18 +333,15 @@ Chat with **Llama 3.1 70B** (AWQ INT4) served via vLLM on AMD Strix Halo (ROCm).
|
||||
with gr.TabItem("💬 Chat"):
|
||||
chatbot = gr.ChatInterface(
|
||||
fn=chat_stream,
|
||||
type="messages",
|
||||
additional_inputs=[system_prompt, temperature, max_tokens, top_p],
|
||||
examples=[
|
||||
"Hello! What can you tell me about yourself?",
|
||||
"Explain how a GPU executes a matrix multiplication.",
|
||||
"Write a Python function to compute the Fibonacci sequence.",
|
||||
"What are the pros and cons of running LLMs on AMD GPUs?",
|
||||
["Hello! What can you tell me about yourself?"],
|
||||
["Explain how a GPU executes a matrix multiplication."],
|
||||
["Write a Python function to compute the Fibonacci sequence."],
|
||||
["What are the pros and cons of running LLMs on AMD GPUs?"],
|
||||
],
|
||||
chatbot=gr.Chatbot(
|
||||
height=520,
|
||||
type="messages",
|
||||
show_copy_button=True,
|
||||
placeholder="Type a message to start chatting...",
|
||||
),
|
||||
)
|
||||
@@ -257,9 +369,13 @@ Chat with **Llama 3.1 70B** (AWQ INT4) served via vLLM on AMD Strix Halo (ROCm).
|
||||
|
||||
gr.Examples(
|
||||
examples=[
|
||||
["Summarise the key differences between CUDA and ROCm for ML workloads."],
|
||||
[
|
||||
"Summarise the key differences between CUDA and ROCm for ML workloads."
|
||||
],
|
||||
["Write a haiku about Kubernetes."],
|
||||
["Explain Ray Serve in one paragraph for someone new to ML serving."],
|
||||
[
|
||||
"Explain Ray Serve in one paragraph for someone new to ML serving."
|
||||
],
|
||||
["List 5 creative uses for a homelab GPU cluster."],
|
||||
],
|
||||
inputs=[prompt_input],
|
||||
|
||||
4
llm.yaml
4
llm.yaml
@@ -20,7 +20,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: gradio
|
||||
image: ghcr.io/billy-davies-2/llm-apps:v2-202602120526
|
||||
image: gitea-http.gitea.svc.cluster.local:3000/daviestechlabs/gradio-ui:latest
|
||||
imagePullPolicy: Always
|
||||
command: ["python", "llm.py"]
|
||||
ports:
|
||||
@@ -53,7 +53,7 @@ spec:
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 10
|
||||
imagePullSecrets:
|
||||
- name: ghcr-registry
|
||||
- name: gitea-registry
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
|
||||
7
renovate.json
Normal file
7
renovate.json
Normal file
@@ -0,0 +1,7 @@
|
||||
{
|
||||
"$schema": "https://docs.renovatebot.com/renovate-schema.json",
|
||||
"extends": [
|
||||
"local>daviestechlabs/renovate-config",
|
||||
"local>daviestechlabs/renovate-config:python"
|
||||
]
|
||||
}
|
||||
146
stt.py
146
stt.py
@@ -9,11 +9,11 @@ Features:
|
||||
- Translation mode
|
||||
- MLflow metrics logging
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
import logging
|
||||
import io
|
||||
import tempfile
|
||||
|
||||
import gradio as gr
|
||||
import httpx
|
||||
@@ -30,13 +30,82 @@ logger = logging.getLogger("stt-demo")
|
||||
STT_URL = os.environ.get(
|
||||
"STT_URL",
|
||||
# Default: Ray Serve whisper endpoint
|
||||
"http://ai-inference-serve-svc.ai-ml.svc.cluster.local:8000/whisper"
|
||||
"http://ai-inference-serve-svc.ai-ml.svc.cluster.local:8000/whisper",
|
||||
)
|
||||
MLFLOW_TRACKING_URI = os.environ.get(
|
||||
"MLFLOW_TRACKING_URI",
|
||||
"http://mlflow.mlflow.svc.cluster.local:80"
|
||||
"MLFLOW_TRACKING_URI", "http://mlflow.mlflow.svc.cluster.local:80"
|
||||
)
|
||||
|
||||
# ─── MLflow experiment tracking ──────────────────────────────────────────
|
||||
try:
|
||||
import mlflow
|
||||
from mlflow.tracking import MlflowClient
|
||||
|
||||
mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)
|
||||
_mlflow_client = MlflowClient()
|
||||
|
||||
_experiment = _mlflow_client.get_experiment_by_name("gradio-stt-tuning")
|
||||
if _experiment is None:
|
||||
_experiment_id = _mlflow_client.create_experiment(
|
||||
"gradio-stt-tuning",
|
||||
artifact_location="/mlflow/artifacts/gradio-stt-tuning",
|
||||
)
|
||||
else:
|
||||
_experiment_id = _experiment.experiment_id
|
||||
|
||||
_mlflow_run = mlflow.start_run(
|
||||
experiment_id=_experiment_id,
|
||||
run_name=f"gradio-stt-{os.environ.get('HOSTNAME', 'local')}",
|
||||
tags={"service": "gradio-stt", "endpoint": STT_URL},
|
||||
)
|
||||
_mlflow_run_id = _mlflow_run.info.run_id
|
||||
_mlflow_step = 0
|
||||
MLFLOW_ENABLED = True
|
||||
logger.info(
|
||||
"MLflow tracking enabled: experiment=%s run=%s", _experiment_id, _mlflow_run_id
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning("MLflow tracking disabled: %s", exc)
|
||||
_mlflow_client = None
|
||||
_mlflow_run_id = None
|
||||
_mlflow_step = 0
|
||||
MLFLOW_ENABLED = False
|
||||
|
||||
|
||||
def _log_stt_metrics(
|
||||
latency: float,
|
||||
audio_duration: float,
|
||||
word_count: int,
|
||||
task: str,
|
||||
) -> None:
|
||||
"""Log STT inference metrics to MLflow (non-blocking best-effort)."""
|
||||
global _mlflow_step
|
||||
if not MLFLOW_ENABLED or _mlflow_client is None:
|
||||
return
|
||||
try:
|
||||
_mlflow_step += 1
|
||||
ts = int(time.time() * 1000)
|
||||
rtf = latency / audio_duration if audio_duration > 0 else 0
|
||||
_mlflow_client.log_batch(
|
||||
_mlflow_run_id,
|
||||
metrics=[
|
||||
mlflow.entities.Metric("latency_s", latency, ts, _mlflow_step),
|
||||
mlflow.entities.Metric(
|
||||
"audio_duration_s", audio_duration, ts, _mlflow_step
|
||||
),
|
||||
mlflow.entities.Metric("realtime_factor", rtf, ts, _mlflow_step),
|
||||
mlflow.entities.Metric("word_count", word_count, ts, _mlflow_step),
|
||||
],
|
||||
params=[]
|
||||
if _mlflow_step > 1
|
||||
else [
|
||||
mlflow.entities.Param("task", task),
|
||||
],
|
||||
)
|
||||
except Exception:
|
||||
logger.debug("MLflow log failed", exc_info=True)
|
||||
|
||||
|
||||
# HTTP client with longer timeout for transcription
|
||||
client = httpx.Client(timeout=180.0)
|
||||
|
||||
@@ -63,9 +132,7 @@ LANGUAGES = {
|
||||
|
||||
|
||||
def transcribe_audio(
|
||||
audio_input: tuple[int, np.ndarray] | str | None,
|
||||
language: str,
|
||||
task: str
|
||||
audio_input: tuple[int, np.ndarray] | str | None, language: str, task: str
|
||||
) -> tuple[str, str, str]:
|
||||
"""Transcribe audio using the Whisper STT service."""
|
||||
if audio_input is None:
|
||||
@@ -81,12 +148,12 @@ def transcribe_audio(
|
||||
|
||||
# Convert to WAV bytes
|
||||
audio_buffer = io.BytesIO()
|
||||
sf.write(audio_buffer, audio_data, sample_rate, format='WAV')
|
||||
sf.write(audio_buffer, audio_data, sample_rate, format="WAV")
|
||||
audio_bytes = audio_buffer.getvalue()
|
||||
audio_duration = len(audio_data) / sample_rate
|
||||
else:
|
||||
# File path
|
||||
with open(audio_input, 'rb') as f:
|
||||
with open(audio_input, "rb") as f:
|
||||
audio_bytes = f.read()
|
||||
# Get duration
|
||||
audio_data, sample_rate = sf.read(audio_input)
|
||||
@@ -117,15 +184,25 @@ def transcribe_audio(
|
||||
text = result.get("text", "")
|
||||
detected_language = result.get("language", "unknown")
|
||||
|
||||
# Log to MLflow
|
||||
_log_stt_metrics(
|
||||
latency=latency,
|
||||
audio_duration=audio_duration,
|
||||
word_count=len(text.split()),
|
||||
task=task,
|
||||
)
|
||||
|
||||
# Status message
|
||||
status = f"✅ Transcribed {audio_duration:.1f}s of audio in {latency*1000:.0f}ms"
|
||||
status = (
|
||||
f"✅ Transcribed {audio_duration:.1f}s of audio in {latency * 1000:.0f}ms"
|
||||
)
|
||||
|
||||
# Metrics
|
||||
metrics = f"""
|
||||
**Transcription Statistics:**
|
||||
- Audio Duration: {audio_duration:.2f} seconds
|
||||
- Processing Time: {latency*1000:.0f}ms
|
||||
- Real-time Factor: {latency/audio_duration:.2f}x
|
||||
- Processing Time: {latency * 1000:.0f}ms
|
||||
- Real-time Factor: {latency / audio_duration:.2f}x
|
||||
- Detected Language: {detected_language}
|
||||
- Task: {task}
|
||||
- Word Count: {len(text.split())}
|
||||
@@ -181,21 +258,19 @@ or file upload with support for 100+ languages.
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
mic_input = gr.Audio(
|
||||
label="Record Audio",
|
||||
sources=["microphone"],
|
||||
type="numpy"
|
||||
label="Record Audio", sources=["microphone"], type="numpy"
|
||||
)
|
||||
|
||||
with gr.Row():
|
||||
mic_language = gr.Dropdown(
|
||||
choices=list(LANGUAGES.keys()),
|
||||
value="Auto-detect",
|
||||
label="Language"
|
||||
label="Language",
|
||||
)
|
||||
mic_task = gr.Radio(
|
||||
choices=["Transcribe", "Translate to English"],
|
||||
value="Transcribe",
|
||||
label="Task"
|
||||
label="Task",
|
||||
)
|
||||
|
||||
mic_btn = gr.Button("🎯 Transcribe", variant="primary")
|
||||
@@ -204,15 +279,12 @@ or file upload with support for 100+ languages.
|
||||
mic_status = gr.Textbox(label="Status", interactive=False)
|
||||
mic_metrics = gr.Markdown(label="Metrics")
|
||||
|
||||
mic_output = gr.Textbox(
|
||||
label="Transcription",
|
||||
lines=5
|
||||
)
|
||||
mic_output = gr.Textbox(label="Transcription", lines=5)
|
||||
|
||||
mic_btn.click(
|
||||
fn=transcribe_audio,
|
||||
inputs=[mic_input, mic_language, mic_task],
|
||||
outputs=[mic_status, mic_output, mic_metrics]
|
||||
outputs=[mic_status, mic_output, mic_metrics],
|
||||
)
|
||||
|
||||
# Tab 2: File Upload
|
||||
@@ -220,21 +292,19 @@ or file upload with support for 100+ languages.
|
||||
with gr.Row():
|
||||
with gr.Column():
|
||||
file_input = gr.Audio(
|
||||
label="Upload Audio File",
|
||||
sources=["upload"],
|
||||
type="filepath"
|
||||
label="Upload Audio File", sources=["upload"], type="filepath"
|
||||
)
|
||||
|
||||
with gr.Row():
|
||||
file_language = gr.Dropdown(
|
||||
choices=list(LANGUAGES.keys()),
|
||||
value="Auto-detect",
|
||||
label="Language"
|
||||
label="Language",
|
||||
)
|
||||
file_task = gr.Radio(
|
||||
choices=["Transcribe", "Translate to English"],
|
||||
value="Transcribe",
|
||||
label="Task"
|
||||
label="Task",
|
||||
)
|
||||
|
||||
file_btn = gr.Button("🎯 Transcribe", variant="primary")
|
||||
@@ -243,15 +313,12 @@ or file upload with support for 100+ languages.
|
||||
file_status = gr.Textbox(label="Status", interactive=False)
|
||||
file_metrics = gr.Markdown(label="Metrics")
|
||||
|
||||
file_output = gr.Textbox(
|
||||
label="Transcription",
|
||||
lines=5
|
||||
)
|
||||
file_output = gr.Textbox(label="Transcription", lines=5)
|
||||
|
||||
file_btn.click(
|
||||
fn=transcribe_audio,
|
||||
inputs=[file_input, file_language, file_task],
|
||||
outputs=[file_status, file_output, file_metrics]
|
||||
outputs=[file_status, file_output, file_metrics],
|
||||
)
|
||||
|
||||
gr.Markdown("""
|
||||
@@ -274,7 +341,7 @@ Whisper will automatically detect the source language.
|
||||
trans_input = gr.Audio(
|
||||
label="Audio Input",
|
||||
sources=["microphone", "upload"],
|
||||
type="numpy"
|
||||
type="numpy",
|
||||
)
|
||||
trans_btn = gr.Button("🌍 Translate to English", variant="primary")
|
||||
|
||||
@@ -282,10 +349,7 @@ Whisper will automatically detect the source language.
|
||||
trans_status = gr.Textbox(label="Status", interactive=False)
|
||||
trans_metrics = gr.Markdown(label="Metrics")
|
||||
|
||||
trans_output = gr.Textbox(
|
||||
label="English Translation",
|
||||
lines=5
|
||||
)
|
||||
trans_output = gr.Textbox(label="English Translation", lines=5)
|
||||
|
||||
def translate_audio(audio):
|
||||
return transcribe_audio(audio, "Auto-detect", "Translate to English")
|
||||
@@ -293,15 +357,11 @@ Whisper will automatically detect the source language.
|
||||
trans_btn.click(
|
||||
fn=translate_audio,
|
||||
inputs=trans_input,
|
||||
outputs=[trans_status, trans_output, trans_metrics]
|
||||
outputs=[trans_status, trans_output, trans_metrics],
|
||||
)
|
||||
|
||||
create_footer()
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch(
|
||||
server_name="0.0.0.0",
|
||||
server_port=7860,
|
||||
show_error=True
|
||||
)
|
||||
demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True)
|
||||
|
||||
2
stt.yaml
2
stt.yaml
@@ -20,7 +20,7 @@ spec:
|
||||
spec:
|
||||
containers:
|
||||
- name: gradio
|
||||
image: ghcr.io/billy-davies-2/llm-apps:v2-202602120526
|
||||
image: gitea-http.gitea.svc.cluster.local:3000/daviestechlabs/gradio-ui:latest
|
||||
imagePullPolicy: Always
|
||||
command: ["python", "stt.py"]
|
||||
ports:
|
||||
|
||||
8
theme.py
8
theme.py
@@ -3,6 +3,7 @@ Shared Gradio theme for Davies Tech Labs AI demos.
|
||||
Consistent styling across all demo applications.
|
||||
Cyberpunk aesthetic - dark with yellow/gold accents.
|
||||
"""
|
||||
|
||||
import gradio as gr
|
||||
|
||||
|
||||
@@ -25,7 +26,12 @@ def get_lab_theme() -> gr.Theme:
|
||||
primary_hue=gr.themes.colors.yellow,
|
||||
secondary_hue=gr.themes.colors.amber,
|
||||
neutral_hue=gr.themes.colors.zinc,
|
||||
font=[gr.themes.GoogleFont("Space Grotesk"), "ui-sans-serif", "system-ui", "sans-serif"],
|
||||
font=[
|
||||
gr.themes.GoogleFont("Space Grotesk"),
|
||||
"ui-sans-serif",
|
||||
"system-ui",
|
||||
"sans-serif",
|
||||
],
|
||||
font_mono=[gr.themes.GoogleFont("JetBrains Mono"), "ui-monospace", "monospace"],
|
||||
).set(
|
||||
# Background colors
|
||||
|
||||
151
tts.py
151
tts.py
@@ -9,11 +9,11 @@ Features:
|
||||
- MLflow metrics logging
|
||||
- Multiple TTS backends support (Coqui XTTS, Piper, etc.)
|
||||
"""
|
||||
|
||||
import os
|
||||
import time
|
||||
import logging
|
||||
import io
|
||||
import base64
|
||||
|
||||
import gradio as gr
|
||||
import httpx
|
||||
@@ -30,13 +30,79 @@ logger = logging.getLogger("tts-demo")
|
||||
TTS_URL = os.environ.get(
|
||||
"TTS_URL",
|
||||
# Default: Ray Serve TTS endpoint
|
||||
"http://ai-inference-serve-svc.ai-ml.svc.cluster.local:8000/tts"
|
||||
"http://ai-inference-serve-svc.ai-ml.svc.cluster.local:8000/tts",
|
||||
)
|
||||
MLFLOW_TRACKING_URI = os.environ.get(
|
||||
"MLFLOW_TRACKING_URI",
|
||||
"http://mlflow.mlflow.svc.cluster.local:80"
|
||||
"MLFLOW_TRACKING_URI", "http://mlflow.mlflow.svc.cluster.local:80"
|
||||
)
|
||||
|
||||
# ─── MLflow experiment tracking ──────────────────────────────────────────
|
||||
try:
|
||||
import mlflow
|
||||
from mlflow.tracking import MlflowClient
|
||||
|
||||
mlflow.set_tracking_uri(MLFLOW_TRACKING_URI)
|
||||
_mlflow_client = MlflowClient()
|
||||
|
||||
_experiment = _mlflow_client.get_experiment_by_name("gradio-tts-tuning")
|
||||
if _experiment is None:
|
||||
_experiment_id = _mlflow_client.create_experiment(
|
||||
"gradio-tts-tuning",
|
||||
artifact_location="/mlflow/artifacts/gradio-tts-tuning",
|
||||
)
|
||||
else:
|
||||
_experiment_id = _experiment.experiment_id
|
||||
|
||||
_mlflow_run = mlflow.start_run(
|
||||
experiment_id=_experiment_id,
|
||||
run_name=f"gradio-tts-{os.environ.get('HOSTNAME', 'local')}",
|
||||
tags={"service": "gradio-tts", "endpoint": TTS_URL},
|
||||
)
|
||||
_mlflow_run_id = _mlflow_run.info.run_id
|
||||
_mlflow_step = 0
|
||||
MLFLOW_ENABLED = True
|
||||
logger.info(
|
||||
"MLflow tracking enabled: experiment=%s run=%s", _experiment_id, _mlflow_run_id
|
||||
)
|
||||
except Exception as exc:
|
||||
logger.warning("MLflow tracking disabled: %s", exc)
|
||||
_mlflow_client = None
|
||||
_mlflow_run_id = None
|
||||
_mlflow_step = 0
|
||||
MLFLOW_ENABLED = False
|
||||
|
||||
|
||||
def _log_tts_metrics(
|
||||
latency: float,
|
||||
audio_duration: float,
|
||||
text_chars: int,
|
||||
language: str,
|
||||
) -> None:
|
||||
"""Log TTS inference metrics to MLflow (non-blocking best-effort)."""
|
||||
global _mlflow_step
|
||||
if not MLFLOW_ENABLED or _mlflow_client is None:
|
||||
return
|
||||
try:
|
||||
_mlflow_step += 1
|
||||
ts = int(time.time() * 1000)
|
||||
rtf = latency / audio_duration if audio_duration > 0 else 0
|
||||
cps = text_chars / latency if latency > 0 else 0
|
||||
_mlflow_client.log_batch(
|
||||
_mlflow_run_id,
|
||||
metrics=[
|
||||
mlflow.entities.Metric("latency_s", latency, ts, _mlflow_step),
|
||||
mlflow.entities.Metric(
|
||||
"audio_duration_s", audio_duration, ts, _mlflow_step
|
||||
),
|
||||
mlflow.entities.Metric("realtime_factor", rtf, ts, _mlflow_step),
|
||||
mlflow.entities.Metric("chars_per_second", cps, ts, _mlflow_step),
|
||||
mlflow.entities.Metric("text_chars", text_chars, ts, _mlflow_step),
|
||||
],
|
||||
)
|
||||
except Exception:
|
||||
logger.debug("MLflow log failed", exc_info=True)
|
||||
|
||||
|
||||
# HTTP client with longer timeout for audio generation
|
||||
client = httpx.Client(timeout=120.0)
|
||||
|
||||
@@ -61,7 +127,9 @@ LANGUAGES = {
|
||||
}
|
||||
|
||||
|
||||
def synthesize_speech(text: str, language: str) -> tuple[str, tuple[int, np.ndarray] | None, str]:
|
||||
def synthesize_speech(
|
||||
text: str, language: str
|
||||
) -> tuple[str, tuple[int, np.ndarray] | None, str]:
|
||||
"""Synthesize speech from text using the TTS service."""
|
||||
if not text.strip():
|
||||
return "❌ Please enter some text", None, ""
|
||||
@@ -73,8 +141,7 @@ def synthesize_speech(text: str, language: str) -> tuple[str, tuple[int, np.ndar
|
||||
|
||||
# Call TTS service (Coqui XTTS API format)
|
||||
response = client.get(
|
||||
f"{TTS_URL}/api/tts",
|
||||
params={"text": text, "language_id": lang_code}
|
||||
f"{TTS_URL}/api/tts", params={"text": text, "language_id": lang_code}
|
||||
)
|
||||
response.raise_for_status()
|
||||
|
||||
@@ -92,7 +159,15 @@ def synthesize_speech(text: str, language: str) -> tuple[str, tuple[int, np.ndar
|
||||
duration = len(audio_data) / sample_rate
|
||||
|
||||
# Status message
|
||||
status = f"✅ Generated {duration:.2f}s of audio in {latency*1000:.0f}ms"
|
||||
status = f"✅ Generated {duration:.2f}s of audio in {latency * 1000:.0f}ms"
|
||||
|
||||
# Log to MLflow
|
||||
_log_tts_metrics(
|
||||
latency=latency,
|
||||
audio_duration=duration,
|
||||
text_chars=len(text),
|
||||
language=lang_code,
|
||||
)
|
||||
|
||||
# Metrics
|
||||
metrics = f"""
|
||||
@@ -100,11 +175,11 @@ def synthesize_speech(text: str, language: str) -> tuple[str, tuple[int, np.ndar
|
||||
- Duration: {duration:.2f} seconds
|
||||
- Sample Rate: {sample_rate} Hz
|
||||
- Size: {len(audio_bytes) / 1024:.1f} KB
|
||||
- Generation Time: {latency*1000:.0f}ms
|
||||
- Real-time Factor: {latency/duration:.2f}x
|
||||
- Generation Time: {latency * 1000:.0f}ms
|
||||
- Real-time Factor: {latency / duration:.2f}x
|
||||
- Language: {language} ({lang_code})
|
||||
- Characters: {len(text)}
|
||||
- Chars/sec: {len(text)/latency:.1f}
|
||||
- Chars/sec: {len(text) / latency:.1f}
|
||||
"""
|
||||
|
||||
return status, (sample_rate, audio_data), metrics
|
||||
@@ -160,16 +235,18 @@ in multiple languages.
|
||||
label="Text to Synthesize",
|
||||
placeholder="Enter text to convert to speech...",
|
||||
lines=5,
|
||||
max_lines=10
|
||||
max_lines=10,
|
||||
)
|
||||
|
||||
with gr.Row():
|
||||
language = gr.Dropdown(
|
||||
choices=list(LANGUAGES.keys()),
|
||||
value="English",
|
||||
label="Language"
|
||||
label="Language",
|
||||
)
|
||||
synthesize_btn = gr.Button(
|
||||
"🔊 Synthesize", variant="primary", scale=2
|
||||
)
|
||||
synthesize_btn = gr.Button("🔊 Synthesize", variant="primary", scale=2)
|
||||
|
||||
with gr.Column(scale=1):
|
||||
status_output = gr.Textbox(label="Status", interactive=False)
|
||||
@@ -180,15 +257,24 @@ in multiple languages.
|
||||
synthesize_btn.click(
|
||||
fn=synthesize_speech,
|
||||
inputs=[text_input, language],
|
||||
outputs=[status_output, audio_output, metrics_output]
|
||||
outputs=[status_output, audio_output, metrics_output],
|
||||
)
|
||||
|
||||
# Example texts
|
||||
gr.Examples(
|
||||
examples=[
|
||||
["Hello! Welcome to Davies Tech Labs. This is a demonstration of our text-to-speech system.", "English"],
|
||||
["The quick brown fox jumps over the lazy dog. This sentence contains every letter of the alphabet.", "English"],
|
||||
["Bonjour! Bienvenue au laboratoire technique de Davies.", "French"],
|
||||
[
|
||||
"Hello! Welcome to Davies Tech Labs. This is a demonstration of our text-to-speech system.",
|
||||
"English",
|
||||
],
|
||||
[
|
||||
"The quick brown fox jumps over the lazy dog. This sentence contains every letter of the alphabet.",
|
||||
"English",
|
||||
],
|
||||
[
|
||||
"Bonjour! Bienvenue au laboratoire technique de Davies.",
|
||||
"French",
|
||||
],
|
||||
["Hola! Bienvenido al laboratorio de tecnología.", "Spanish"],
|
||||
["Guten Tag! Willkommen im Techniklabor.", "German"],
|
||||
],
|
||||
@@ -200,14 +286,16 @@ in multiple languages.
|
||||
gr.Markdown("Compare the same text in different languages.")
|
||||
|
||||
compare_text = gr.Textbox(
|
||||
label="Text to Compare",
|
||||
value="Hello, how are you today?",
|
||||
lines=2
|
||||
label="Text to Compare", value="Hello, how are you today?", lines=2
|
||||
)
|
||||
|
||||
with gr.Row():
|
||||
lang1 = gr.Dropdown(choices=list(LANGUAGES.keys()), value="English", label="Language 1")
|
||||
lang2 = gr.Dropdown(choices=list(LANGUAGES.keys()), value="Spanish", label="Language 2")
|
||||
lang1 = gr.Dropdown(
|
||||
choices=list(LANGUAGES.keys()), value="English", label="Language 1"
|
||||
)
|
||||
lang2 = gr.Dropdown(
|
||||
choices=list(LANGUAGES.keys()), value="Spanish", label="Language 2"
|
||||
)
|
||||
|
||||
compare_btn = gr.Button("Compare Languages", variant="primary")
|
||||
|
||||
@@ -230,7 +318,7 @@ in multiple languages.
|
||||
compare_btn.click(
|
||||
fn=compare_languages,
|
||||
inputs=[compare_text, lang1, lang2],
|
||||
outputs=[status1, audio1, status2, audio2]
|
||||
outputs=[status1, audio1, status2, audio2],
|
||||
)
|
||||
|
||||
# Tab 3: Batch Processing
|
||||
@@ -240,19 +328,16 @@ in multiple languages.
|
||||
batch_input = gr.Textbox(
|
||||
label="Texts (one per line)",
|
||||
placeholder="Enter multiple texts, one per line...",
|
||||
lines=6
|
||||
lines=6,
|
||||
)
|
||||
batch_lang = gr.Dropdown(
|
||||
choices=list(LANGUAGES.keys()),
|
||||
value="English",
|
||||
label="Language"
|
||||
choices=list(LANGUAGES.keys()), value="English", label="Language"
|
||||
)
|
||||
batch_btn = gr.Button("Synthesize All", variant="primary")
|
||||
|
||||
batch_status = gr.Textbox(label="Status", interactive=False)
|
||||
batch_audios = gr.Dataset(
|
||||
components=[gr.Audio(type="numpy")],
|
||||
label="Generated Audio Files"
|
||||
components=[gr.Audio(type="numpy")], label="Generated Audio Files"
|
||||
)
|
||||
|
||||
# Note: Batch processing would need more complex handling
|
||||
@@ -266,8 +351,4 @@ or the Kubeflow pipeline for better throughput.*
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
demo.launch(
|
||||
server_name="0.0.0.0",
|
||||
server_port=7860,
|
||||
show_error=True
|
||||
)
|
||||
demo.launch(server_name="0.0.0.0", server_port=7860, show_error=True)
|
||||
|
||||
Reference in New Issue
Block a user