# scraper/tasks/pipeline.py from celery import chain from scraper.tasks.download_tasks import download_chapter from scraper.tasks.parse_tasks import parse_chapter from scraper.tasks.save_tasks import save_chapter def build_chapter_pipeline( chapter_number: int, chapter_url: str, base_path: str, meta: dict ): """ Build a download → parse → save pipeline for one chapter. meta bevat: title, author, description """ return chain( download_chapter.s(chapter_number, chapter_url), parse_chapter.s(meta), # ← METADATA DOORGEVEN save_chapter.s(base_path), )