# scraper/download_controller.py from logbus.publisher import log from scraper.tasks.pipeline import build_chapter_pipeline class DownloadController: def __init__(self, url: str): self.url = url self.scraper = None # door BookScraper gevuld self.base_path = None def start(self): log(f"[DL-CONTROLLER] Parsing metadata for {self.url}") # 1) Boek info verzamelen scraper = self.scraper = self._init_scraper() scraper.parse_book_info() # base_path bepalen self.base_path = scraper.get_base_path() # 2) Chapters ophalen chapters = scraper.get_chapter_list() # 3) Per chapter een Celery pipeline starten for ch in chapters: log(f"[DL-CONTROLLER] Queue pipeline for chapter {ch.number}") workflow = build_chapter_pipeline( chapter_number=ch.number, chapter_url=ch.url, base_path=self.base_path ) workflow.delay() # 🔥 dit start de chain return {"status": "queued", "chapters": len(chapters)}