AI Art Pipeline with Python
Generative AI has become one of the most powerful new tools available to artists and technical artists alike. In this project you'll build a complete AI art pipeline - from single image generation to batch processing to a reusable prompt manager - all driven by Python. By the end, you'll have a portfolio-ready tool that demonstrates both your coding skills and your creative problem-solving.
Overview of AI Art Tools for TAs
As a technical artist, your role isn't to replace concept artists - it's to build systems that accelerate creative workflows. AI image generators like Stable Diffusion, DALL·E, and Midjourney are powerful, but their real value appears when you wrap them in automation:
- Texture generation - create seamless tiling textures on demand.
- Concept art exploration - rapidly iterate on visual ideas before committing to production art.
- Variation generation - produce dozens of colour, lighting, or style variations from a single base prompt.
- Moodboard assembly - auto-generate reference sheets for art direction.
This guide uses the Stable Diffusion WebUI API (AUTOMATIC1111) running locally. You can adapt the same patterns for cloud APIs like the Stability AI REST API or OpenAI's DALL·E endpoint.
Setting Up Your Python Environment
Create a dedicated virtual environment so your AI art dependencies stay isolated from your other projects.
# setup_env.py - Run once to bootstrap your project
import subprocess
import sys
import os
def create_environment(env_name="ai_art_env"):
"""Create a virtual environment and install dependencies."""
venv_path = os.path.join(os.getcwd(), env_name)
if not os.path.exists(venv_path):
print(f"Creating virtual environment: {env_name}")
subprocess.check_call([sys.executable, "-m", "venv", env_name])
# Determine pip path inside the venv
if os.name == "nt":
pip_path = os.path.join(venv_path, "Scripts", "pip.exe")
else:
pip_path = os.path.join(venv_path, "bin", "pip")
packages = [
"requests", # HTTP calls to the SD API
"Pillow", # Image manipulation
"tqdm", # Progress bars for batch jobs
]
print("Installing packages...")
subprocess.check_call([pip_path, "install", "--upgrade", "pip"])
subprocess.check_call([pip_path, "install"] + packages)
print("Environment ready.")
if __name__ == "__main__":
create_environment()
If you're using the Stability AI cloud API instead of a local server, add "stability-sdk" to the packages list above.
Basic Image Generation with the Stable Diffusion API
The AUTOMATIC1111 WebUI exposes a REST API on http://127.0.0.1:7860 when launched with the --api flag. The /sdapi/v1/txt2img endpoint accepts a JSON payload and returns a base64-encoded image.
"""generate_image.py - Generate a single image from a text prompt."""
import requests
import base64
import io
from pathlib import Path
from PIL import Image
SD_API_URL = "http://127.0.0.1:7860/sdapi/v1/txt2img"
def generate_image(
prompt: str,
negative_prompt: str = "blurry, low quality, watermark",
width: int = 512,
height: int = 512,
steps: int = 30,
cfg_scale: float = 7.0,
seed: int = -1,
) -> Image.Image:
"""Send a txt2img request and return a PIL Image."""
payload = {
"prompt": prompt,
"negative_prompt": negative_prompt,
"width": width,
"height": height,
"steps": steps,
"cfg_scale": cfg_scale,
"seed": seed,
}
response = requests.post(SD_API_URL, json=payload, timeout=120)
response.raise_for_status()
# The API returns images as base64 strings inside a JSON array
img_data = base64.b64decode(response.json()["images"][0])
return Image.open(io.BytesIO(img_data))
def save_image(image: Image.Image, output_dir: str, filename: str) -> Path:
"""Save a PIL Image to disk and return the file path."""
out = Path(output_dir)
out.mkdir(parents=True, exist_ok=True)
filepath = out / filename
image.save(filepath)
print(f"Saved: {filepath}")
return filepath
if __name__ == "__main__":
prompt = "fantasy sword, game asset, painted metal, studio lighting, 4k"
img = generate_image(prompt)
save_image(img, "output", "sword_concept.png")
Batch Generation of Variations
Generating one image is useful for testing, but the real power comes from batch processing. The script below reads a list of prompts, generates multiple seed variations for each, and saves everything into an organised folder structure.
"""batch_generate.py - Generate multiple variations for a list of prompts."""
import json
import time
from pathlib import Path
from tqdm import tqdm
from generate_image import generate_image, save_image
def slugify(text: str) -> str:
"""Convert a prompt into a filesystem-safe folder name."""
return "".join(c if c.isalnum() or c in "-_ " else "" for c in text)[:60].strip().replace(" ", "_")
def batch_generate(
prompts: list[str],
seeds: list[int],
output_root: str = "batch_output",
delay: float = 0.5,
) -> list[Path]:
"""Generate len(prompts) × len(seeds) images."""
saved_files = []
for prompt in tqdm(prompts, desc="Prompts"):
folder_name = slugify(prompt)
for seed in seeds:
img = generate_image(prompt, seed=seed)
filepath = save_image(
img,
output_dir=str(Path(output_root) / folder_name),
filename=f"seed_{seed}.png",
)
saved_files.append(filepath)
time.sleep(delay) # avoid hammering the API
return saved_files
if __name__ == "__main__":
prompts = [
"sci-fi corridor, neon lights, cyberpunk, cinematic",
"medieval castle gate, mossy stone, overcast sky, matte painting",
"crystal cave, volumetric light, fantasy, concept art",
]
seeds = [42, 123, 7777, 99999]
results = batch_generate(prompts, seeds)
print(f"\nGenerated {len(results)} images.")
# Save a manifest for pipeline integration
manifest = [{"prompt": p, "file": str(f)} for p, f in zip(prompts, results)]
Path("batch_output/manifest.json").write_text(json.dumps(manifest, indent=2))
print("Manifest written to batch_output/manifest.json")
Batch jobs can be slow on consumer GPUs. A 30-step generation at 512×512 takes roughly 5-15 seconds depending on your hardware. Plan your seed lists accordingly.
Building a Prompt Manager Tool
Prompt engineering is half the battle with generative AI. A prompt manager lets you store, tag, search, and combine prompt fragments so you can iterate quickly without rewriting from scratch every time.
"""prompt_manager.py - Store, search, and compose prompts from reusable fragments."""
import json
from pathlib import Path
from dataclasses import dataclass, field, asdict
LIBRARY_PATH = Path("prompt_library.json")
@dataclass
class PromptFragment:
text: str
tags: list[str] = field(default_factory=list)
category: str = "general"
class PromptManager:
"""Manage a library of reusable prompt fragments."""
def __init__(self, library_path: Path = LIBRARY_PATH):
self.library_path = library_path
self.fragments: list[PromptFragment] = []
self._load()
# -- Persistence ----------------------------------------------------------
def _load(self):
if self.library_path.exists():
data = json.loads(self.library_path.read_text())
self.fragments = [PromptFragment(**item) for item in data]
def save(self):
self.library_path.write_text(
json.dumps([asdict(f) for f in self.fragments], indent=2)
)
# -- CRUD -----------------------------------------------------------------
def add(self, text: str, tags: list[str] = None, category: str = "general"):
fragment = PromptFragment(text=text, tags=tags or [], category=category)
self.fragments.append(fragment)
self.save()
return fragment
def search(self, query: str) -> list[PromptFragment]:
query_lower = query.lower()
return [
f for f in self.fragments
if query_lower in f.text.lower()
or any(query_lower in tag.lower() for tag in f.tags)
]
def by_category(self, category: str) -> list[PromptFragment]:
return [f for f in self.fragments if f.category == category]
# -- Composition ----------------------------------------------------------
def compose(self, fragments: list[PromptFragment], separator: str = ", ") -> str:
"""Join multiple fragments into a single prompt string."""
return separator.join(f.text for f in fragments)
if __name__ == "__main__":
pm = PromptManager()
# Seed the library with useful fragments
pm.add("4k, highly detailed, sharp focus", tags=["quality"], category="quality")
pm.add("studio lighting, soft shadows", tags=["lighting"], category="lighting")
pm.add("fantasy sword, ornate hilt, glowing runes", tags=["weapon", "fantasy"], category="subject")
pm.add("sci-fi helmet, visor, carbon fiber", tags=["sci-fi", "armor"], category="subject")
pm.add("painted metal texture, worn edges", tags=["texture", "metal"], category="style")
pm.add("concept art, matte painting style", tags=["concept"], category="style")
# Search and compose
subjects = pm.search("fantasy")
quality = pm.by_category("quality")
style = pm.by_category("style")
full_prompt = pm.compose(subjects + quality + style[:1])
print(f"Composed prompt:\n{full_prompt}")
# Output: fantasy sword, ornate hilt, glowing runes, 4k, highly detailed,
# sharp focus, painted metal texture, worn edges
Store your prompt library in version control. Over time it becomes a valuable asset - a curated collection of prompts tuned to your studio's art style.
Integrating AI Outputs into a Pipeline
Raw AI outputs rarely go straight into production. The script below shows a realistic post-processing pipeline: resize to a power-of-two resolution, convert to the correct colour space, and tile-check a texture before writing it to a project's asset folder.
"""pipeline.py - Post-process AI images for production use."""
from pathlib import Path
from PIL import Image, ImageFilter
import json
def nearest_power_of_two(value: int) -> int:
"""Round up to the nearest power of two."""
power = 1
while power < value:
power *= 2
return power
def resize_to_pot(image: Image.Image) -> Image.Image:
"""Resize an image so both dimensions are powers of two."""
new_w = nearest_power_of_two(image.width)
new_h = nearest_power_of_two(image.height)
return image.resize((new_w, new_h), Image.LANCZOS)
def make_seamless_tile(image: Image.Image, blend_margin: int = 64) -> Image.Image:
"""Create a simple seamless tile by blending opposite edges."""
w, h = image.size
result = image.copy()
# Blend left-right edges
for x in range(blend_margin):
alpha = x / blend_margin
for y in range(h):
left = image.getpixel((x, y))
right = image.getpixel((w - blend_margin + x, y))
blended = tuple(int(l * alpha + r * (1 - alpha)) for l, r in zip(left, right))
result.putpixel((x, y), blended)
return result
def process_for_production(
input_path: str,
output_dir: str,
target_format: str = "PNG",
make_tileable: bool = False,
) -> dict:
"""Full pipeline: load, resize, optionally tile, and save."""
img = Image.open(input_path).convert("RGB")
metadata = {"source": input_path, "original_size": list(img.size)}
img = resize_to_pot(img)
metadata["pot_size"] = list(img.size)
if make_tileable:
img = make_seamless_tile(img)
metadata["tileable"] = True
out_dir = Path(output_dir)
out_dir.mkdir(parents=True, exist_ok=True)
out_name = Path(input_path).stem + f"_production.{target_format.lower()}"
out_path = out_dir / out_name
img.save(out_path, format=target_format)
metadata["output"] = str(out_path)
return metadata
if __name__ == "__main__":
# Process every PNG in the batch output folder
batch_dir = Path("batch_output")
results = []
for png in batch_dir.rglob("*.png"):
meta = process_for_production(
str(png),
output_dir="production_assets",
make_tileable=True,
)
results.append(meta)
print(f"Processed: {meta['output']}")
# Write a pipeline report
report_path = Path("production_assets/pipeline_report.json")
report_path.write_text(json.dumps(results, indent=2))
print(f"\nPipeline report: {report_path}")
For texture work, you'll often want to run generated images through additional steps - normal map generation, roughness extraction, or colour grading. Libraries like opencv-python and numpy are excellent for this. The pattern above is designed to be extended with extra processing stages.
Portfolio Presentation Tips
A strong portfolio entry for an AI art pipeline project should include:
- Before / After comparisons - show the raw AI output next to your post-processed production asset.
- Code snippets with commentary - highlight the most interesting technical decisions you made.
- A short video or GIF - demonstrate the tool in action: run the batch generator, show the prompt manager composing prompts, and display the final assets in a 3D viewer.
- Metrics - "Generated 200 texture variations in 12 minutes" is more memorable than "it was fast."
- Context - explain how the tool would fit into a real studio pipeline and what you'd improve with more time.
Upload your project to GitHub with a clear README, a requirements.txt, and example output images. Recruiters and leads will clone and try it - make the first-run experience effortless.