Coverage for backend / app / job_email_scraping / routers / job_email.py: 97%
35 statements
« prev ^ index » next coverage.py v7.13.5, created at 2026-03-17 21:34 +0000
« prev ^ index » next coverage.py v7.13.5, created at 2026-03-17 21:34 +0000
1"""FastAPI routers for the job email scraping service endpoints.
3Provides REST API endpoints for managing job alert emails, scraped job postings,
4and service execution logs with CRUD operations and admin access controls."""
6from typing import Literal
8from fastapi import Depends, HTTPException
9from fastapi.routing import APIRouter
10from sqlalchemy import asc, desc, or_
11from sqlalchemy.orm import Session
12from starlette import status
14from app import models
15from app.core.oauth2 import get_current_user
16from app.database import get_db
17from app.job_email_scraping import schemas
19job_alert_email_router = APIRouter(
20 prefix="/job-alert-emails",
21 tags=["job-alert-emails"],
22)
25@job_alert_email_router.get("/paged", response_model=schemas.PaginatedJobEmailResponse)
26def get_job_emails_paged(
27 db: Session = Depends(get_db),
28 current_user: models.User = Depends(get_current_user),
29 page: int = 0,
30 page_size: int = 10,
31 sort_by: str = "date_received",
32 sort_direction: Literal["asc", "desc"] = "desc",
33 search: str | None = None,
34) -> dict:
35 """Retrieve paginated job alert emails for the current user.
36 :param db: Database session
37 :param current_user: Current authenticated user
38 :param page: Page number
39 :param page_size: Page size
40 :param sort_by: Sort key
41 :param sort_direction: Sort direction
42 :param search: Search term"""
44 query = db.query(models.JobEmail).filter(models.JobEmail.owner_id == current_user.id)
46 total = query.count()
48 if search:
49 search_term = f"%{search}%"
50 query = query.filter(
51 or_(
52 models.JobEmail.subject.ilike(search_term),
53 models.JobEmail.sender.ilike(search_term),
54 models.JobEmail.platform.ilike(search_term),
55 models.JobEmail.alert_name.ilike(search_term),
56 )
57 )
59 if hasattr(models.JobEmail, sort_by):
60 sort_column = getattr(models.JobEmail, sort_by)
61 if sort_direction == "desc":
62 query = query.order_by(desc(sort_column).nulls_last())
63 else:
64 query = query.order_by(asc(sort_column).nulls_last())
65 else:
66 query = query.order_by(desc(models.JobEmail.date_received).nulls_last())
68 total_filtered = query.count()
69 offset = page * page_size
70 total_pages = (total_filtered + page_size - 1) // page_size if total_filtered > 0 else 1
71 results = query.offset(offset).limit(page_size).all()
73 return {
74 "items": results,
75 "total": total,
76 "total_filtered": total_filtered,
77 "page": page,
78 "page_size": page_size,
79 "total_pages": total_pages,
80 }
83@job_alert_email_router.get("/by-scraped-job/{job_id}", response_model=list[schemas.JobEmailOut])
84def get_job_emails_by_scraped_job(
85 job_id: int,
86 current_user: models.User = Depends(get_current_user),
87 db: Session = Depends(get_db),
88):
89 """Get job emails associated with a specific scraped job for the current user.
90 :param job_id: ID of the scraped job
91 :param current_user: Current authenticated user
92 :param db: Database session
93 :return: List of job emails linked to the scraped job"""
95 job = (
96 db.query(models.ScrapedJob)
97 .filter(models.ScrapedJob.id == job_id)
98 .filter(models.ScrapedJob.owner_id == current_user.id)
99 .first()
100 )
101 if not job:
102 raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="Scraped job not found")
103 return job.emails