upd gpt; add pdf generating

This commit is contained in:
Даниил Ивлев 2025-09-09 20:26:14 +05:00
parent e9a70cf393
commit 954fa2bc50
22 changed files with 1458 additions and 592 deletions

View File

@ -18,7 +18,7 @@ if os.name == "nt": # Windows
from livekit.agents import Agent, AgentSession, JobContext, WorkerOptions, cli
from livekit.api import DeleteRoomRequest, LiveKitAPI
from livekit.plugins import cartesia, deepgram, openai, silero
from livekit.plugins import openai, silero
from app.core.database import get_session
from app.repositories.interview_repository import InterviewRepository
@ -157,37 +157,38 @@ class InterviewAgent:
"part": "Частичная занятость",
"project": "Проектная работа",
"volunteer": "Волонтёрство",
"probation": "Стажировка"
"probation": "Стажировка",
}
experience_map = {
"noExperience": "Без опыта",
"between1And3": "1-3 года",
"between3And6": "3-6 лет",
"moreThan6": "Более 6 лет"
"moreThan6": "Более 6 лет",
}
schedule_map = {
"fullDay": "Полный день",
"shift": "Сменный график",
"flexible": "Гибкий график",
"remote": "Удалённая работа",
"flyInFlyOut": "Вахтовый метод"
"flyInFlyOut": "Вахтовый метод",
}
vacancy_info = f"""
ИНФОРМАЦИЯ О ВАКАНСИИ:
- Должность: {self.vacancy_data.get('title', 'Не указана')}
- Описание: {self.vacancy_data.get('description', 'Не указано')}
- Ключевые навыки: {self.vacancy_data.get('key_skills') or 'Не указаны'}
- Тип занятости: {employment_type_map.get(self.vacancy_data.get('employment_type'), self.vacancy_data.get('employment_type', 'Не указан'))}
- Опыт работы: {experience_map.get(self.vacancy_data.get('experience'), self.vacancy_data.get('experience', 'Не указан'))}
- График работы: {schedule_map.get(self.vacancy_data.get('schedule'), self.vacancy_data.get('schedule', 'Не указан'))}
- Регион: {self.vacancy_data.get('area_name', 'Не указан')}
- Профессиональные роли: {self.vacancy_data.get('professional_roles') or 'Не указаны'}
- Контактное лицо: {self.vacancy_data.get('contacts_name') or 'Не указано'}"""
- Должность: {self.vacancy_data.get("title", "Не указана")}
- Описание: {self.vacancy_data.get("description", "Не указано")}
- Ключевые навыки: {self.vacancy_data.get("key_skills") or "Не указаны"}
- Тип занятости: {employment_type_map.get(self.vacancy_data.get("employment_type"), self.vacancy_data.get("employment_type", "Не указан"))}
- Опыт работы: {experience_map.get(self.vacancy_data.get("experience"), self.vacancy_data.get("experience", "Не указан"))}
- График работы: {schedule_map.get(self.vacancy_data.get("schedule"), self.vacancy_data.get("schedule", "Не указан"))}
- Регион: {self.vacancy_data.get("area_name", "Не указан")}
- Профессиональные роли: {self.vacancy_data.get("professional_roles") or "Не указаны"}
- Контактное лицо: {self.vacancy_data.get("contacts_name") or "Не указано"}"""
return f"""
Ты опытный HR-интервьюер Стефани, который проводит адаптивное голосовое собеседование. Представься контактным именем из вакансии (если оно есть)
Ты опытный HR-интервьюер Стефани, который проводит адаптивное голосовое собеседование. Представься как Стефани
Разговаривай только на русском языке.
ИНФОРМАЦИЯ О ВАКАНСИИ:
@ -197,6 +198,7 @@ class InterviewAgent:
- Имя: {candidate_name}
- Опыт работы: {candidate_years} лет
- Ключевые навыки: {candidate_skills}
Из имени определи пол и упоминай кандидата исходя из пола
ЦЕЛЬ ИНТЕРВЬЮ:
@ -213,7 +215,7 @@ class InterviewAgent:
- Способность учиться и адаптироваться.
- Совпадение ценностей и принципов с командой и компанией.
ПЛАН ИНТЕРВЬЮ (как руководство, адаптируйся по ситуации)
ПЛАН ИНТЕРВЬЮ (имей его ввиду, но адаптируйся под ситуацию: либо углубиться в детали, либо перейти к следующему вопросу)
{sections_info}
@ -227,9 +229,8 @@ class InterviewAgent:
Проблемные / кейсы (20%) проверить мышление и подход к решению.
Пример: "У нас есть система, которая падает раз в неделю. Как бы ты подошёл к диагностике проблемы?"
Задавай вопросы кратко и понятно. Не вываливай кучу информации на человека.
Задавай вопросы кратко и понятно (максимум тремя предложениями). Не вываливай кучу информации на кандидата.
Не перечисляй человеку все пункты и вопросы из секции. Предлагай один общий вопрос или задавай уточняющие по по очереди.
Ты должна спрашивать вопросы максимум в 3 предложения
ВРЕМЯ ИНТЕРВЬЮ:
- Запланированная длительность: {self.duration_minutes} минут
@ -251,7 +252,7 @@ class InterviewAgent:
ИНСТРУКЦИИ:
1. Начни с приветствия: {greeting}
2. Адаптируй вопросы под ответы кандидата
3. Не повторяй то, что клиент тебе сказал, лучше показывай, что понял, услышал и иди дальше. Лишний раз его не хвали
3. Не повторяй то, что клиент тебе сказал, лучше показывай, что поняла, услышала, и иди дальше. Лишний раз его не хвали
3. Следи за временем - при превышении 80% времени начинай завершать интервью
4. Оценивай качество и глубину ответов кандидата
5. Если получаешь сообщение "[СИСТЕМА] Клиент молчит..." - это означает проблемы со связью или кандидат растерялся. Скажи что-то вроде "Приём! Ты меня слышишь?" или "Всё в порядке? Связь не пропала?"
@ -282,7 +283,6 @@ class InterviewAgent:
def get_time_info(self) -> dict[str, float]:
"""Получает информацию о времени интервью"""
import time
if self.interview_start_time is None:
# Интервью еще не началось
@ -294,7 +294,9 @@ class InterviewAgent:
current_time = self.interview_end_time or time.time()
elapsed_minutes = (current_time - self.interview_start_time) / 60
remaining_minutes = max(0.0, self.duration_minutes - elapsed_minutes)
time_percentage = min(100.0, (elapsed_minutes / self.duration_minutes) * 100)
time_percentage = min(
100.0, (elapsed_minutes / self.duration_minutes) * 100
)
return {
"elapsed_minutes": elapsed_minutes,
@ -366,7 +368,9 @@ async def entrypoint(ctx: JobContext):
session_id = metadata.get("session_id", session_id)
logger.info(f"[INIT] Loaded interview plan for session {session_id}")
if vacancy_data:
logger.info(f"[INIT] Loaded vacancy data from metadata: {vacancy_data.get('title', 'Unknown')}")
logger.info(
f"[INIT] Loaded vacancy data from metadata: {vacancy_data.get('title', 'Unknown')}"
)
except Exception as e:
logger.warning(f"[INIT] Failed to load metadata: {str(e)}")
interview_plan = {}
@ -409,31 +413,13 @@ async def entrypoint(ctx: JobContext):
)
# STT
stt = (
openai.STT(
model="whisper-1", language="ru", api_key=settings.openai_api_key
)
if settings.openai_api_key
else openai.STT(
model="whisper-1", language="ru", api_key=settings.openai_api_key
)
)
stt = openai.STT(model="whisper-1", language="ru", api_key=settings.openai_api_key)
# LLM
llm = openai.LLM(
model="gpt-5-mini", api_key=settings.openai_api_key
)
llm = openai.LLM(model="gpt-5-mini", api_key=settings.openai_api_key)
# TTS
tts = (
openai.TTS(
model="tts-1-hd",
api_key=settings.openai_api_key,
voice='nova'
)
if settings.openai_api_key
else silero.TTS(language="ru", model="v4_ru")
)
tts = openai.TTS(model="tts-1-hd", api_key=settings.openai_api_key, voice="nova")
# Создаем обычный Agent и Session
agent = Agent(instructions=interviewer.get_system_instructions())
@ -444,7 +430,7 @@ async def entrypoint(ctx: JobContext):
stt=stt,
llm=llm,
tts=tts,
user_away_timeout=7.0 # 7 секунд неактивности для срабатывания away
user_away_timeout=7.0, # 7 секунд неактивности для срабатывания away
)
# --- Сохранение диалога в БД ---
@ -482,16 +468,21 @@ async def entrypoint(ctx: JobContext):
interviewer_instance.interview_finalized = True
# Устанавливаем время завершения интервью
import time
interviewer_instance.interview_end_time = time.time()
if interviewer_instance.interview_start_time:
total_minutes = (interviewer_instance.interview_end_time - interviewer_instance.interview_start_time) / 60
total_minutes = (
interviewer_instance.interview_end_time
- interviewer_instance.interview_start_time
) / 60
logger.info(
f"[TIME] Interview ended at {time.strftime('%H:%M:%S')}, total duration: {total_minutes:.1f} min"
)
else:
logger.info(f"[TIME] Interview ended at {time.strftime('%H:%M:%S')} (no start time recorded)")
logger.info(
f"[TIME] Interview ended at {time.strftime('%H:%M:%S')} (no start time recorded)"
)
try:
logger.info(
@ -553,9 +544,7 @@ async def entrypoint(ctx: JobContext):
)
if not interviewer.interview_finalized:
await complete_interview_sequence(
ctx.room.name, interviewer
)
await complete_interview_sequence(ctx.room.name, interviewer)
break
return False
@ -616,12 +605,13 @@ async def entrypoint(ctx: JobContext):
"""Обработчик изменения состояния пользователя (активен/неактивен)"""
async def on_change():
logger.info(f"[USER_STATE] User state changed to: {event.new_state}")
# === Пользователь молчит более 10 секунд (state == away) ===
if event.new_state == "away" and interviewer.intro_done:
logger.info("[USER_STATE] User away detected, sending check-in message...")
logger.info(
"[USER_STATE] User away detected, sending check-in message..."
)
# сообщение — проверка связи
await session.generate_reply(
@ -683,11 +673,8 @@ async def entrypoint(ctx: JobContext):
logger.error(f"[SEQUENCE] Step 3: Failed to send release signal: {str(e)}")
logger.info("[SEQUENCE] Step 3: Continuing without release signal")
# --- Упрощенная логика обработки пользовательского ответа ---
async def handle_user_input(user_response: str):
current_section = interviewer.get_current_section()
# Сохраняем ответ пользователя
@ -707,6 +694,7 @@ async def entrypoint(ctx: JobContext):
interviewer.intro_done = True
# Устанавливаем время начала интервью при первом сообщении
import time
interviewer.interview_start_time = time.time()
logger.info(
f"[TIME] Interview started at {time.strftime('%H:%M:%S')}, duration: {interviewer.duration_minutes} min"
@ -734,7 +722,6 @@ async def entrypoint(ctx: JobContext):
if role == "user":
asyncio.create_task(handle_user_input(text))
elif role == "assistant":
# Сохраняем ответ агента в историю диалога
current_section = interviewer.get_current_section()
interviewer.conversation_history.append(

View File

@ -18,7 +18,11 @@ class S3Service:
self.bucket_name = settings.s3_bucket_name
async def upload_file(
self, file_content: bytes, file_name: str, content_type: str, public: bool = False
self,
file_content: bytes,
file_name: str,
content_type: str,
public: bool = False,
) -> str | None:
try:
file_key = f"{uuid.uuid4()}_{file_name}"
@ -35,7 +39,9 @@ class S3Service:
self.s3_client.put_object(**put_object_kwargs)
file_url = f"https://d8d88bee-afd2-4266-8332-538389e25f52.selstorage.ru/{file_key}"
file_url = (
f"https://d8d88bee-afd2-4266-8332-538389e25f52.selstorage.ru/{file_key}"
)
return file_url
except ClientError as e:

View File

@ -38,12 +38,17 @@ class InterviewSession(InterviewSessionBase, table=True):
id: int | None = Field(default=None, primary_key=True)
started_at: datetime = Field(default_factory=datetime.utcnow)
completed_at: datetime | None = None
interview_start_time: datetime | None = None
interview_end_time: datetime | None = None
# Связь с отчетом (один к одному)
report: Optional["InterviewReport"] = Relationship(
back_populates="interview_session"
)
# Связь с резюме
resume: Optional["Resume"] = Relationship()
class InterviewSessionCreate(SQLModel):
resume_id: int

View File

@ -6,8 +6,8 @@ from sqlalchemy import select, update
from sqlalchemy.ext.asyncio import AsyncSession
from app.core.database import get_session
from app.models.interview_report import InterviewReport
from app.models.interview import InterviewSession
from app.models.interview_report import InterviewReport
from app.models.resume import Resume
from app.models.vacancy import Vacancy
from app.repositories.base_repository import BaseRepository
@ -64,7 +64,10 @@ class InterviewReportRepository(BaseRepository[InterviewReport]):
"""Получить все отчёты по вакансии"""
statement = (
select(InterviewReport)
.join(InterviewSession, InterviewSession.id == InterviewReport.interview_session_id)
.join(
InterviewSession,
InterviewSession.id == InterviewReport.interview_session_id,
)
.join(Resume, Resume.id == InterviewSession.resume_id)
.join(Vacancy, Vacancy.id == Resume.vacancy_id)
.where(Vacancy.id == vacancy_id)

View File

@ -1,5 +1,4 @@
import json
import os
from datetime import UTC, datetime
from fastapi import APIRouter, Depends, HTTPException
@ -132,13 +131,13 @@ async def force_end_interview(session_id: int) -> dict:
if agent_status["status"] != "active":
raise HTTPException(
status_code=400,
detail=f"Agent is not active, current status: {agent_status['status']}"
detail=f"Agent is not active, current status: {agent_status['status']}",
)
if agent_status["session_id"] != session_id:
raise HTTPException(
status_code=400,
detail=f"Agent is not handling session {session_id}, current session: {agent_status['session_id']}"
detail=f"Agent is not handling session {session_id}, current session: {agent_status['session_id']}",
)
# Записываем команду завершения в файл команд
@ -147,7 +146,7 @@ async def force_end_interview(session_id: int) -> dict:
"action": "end_session",
"session_id": session_id,
"timestamp": datetime.now(UTC).isoformat(),
"initiated_by": "admin_api"
"initiated_by": "admin_api",
}
with open(command_file, "w", encoding="utf-8") as f:
@ -157,13 +156,12 @@ async def force_end_interview(session_id: int) -> dict:
"success": True,
"message": f"Force end command sent for session {session_id}",
"session_id": session_id,
"command_file": command_file
"command_file": command_file,
}
except HTTPException:
raise
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Failed to send force end command: {str(e)}"
status_code=500, detail=f"Failed to send force end command: {str(e)}"
)

View File

@ -4,7 +4,6 @@ from pydantic import BaseModel
from app.core.database import get_session
from app.repositories.resume_repository import ResumeRepository
from app.services.pdf_report_service import PDFReportService
from celery_worker.interview_analysis_task import (
analyze_multiple_candidates,
generate_interview_report,
@ -300,23 +299,23 @@ async def get_pdf_report(
return RedirectResponse(url=report.pdf_report_url, status_code=302)
@router.post("/generate-pdf/{resume_id}", response_model=PDFGenerationResponse)
@router.post("/generate-pdf/{resume_id}")
async def generate_pdf_report(
resume_id: int,
session=Depends(get_session),
resume_repo: ResumeRepository = Depends(ResumeRepository),
pdf_report_service: PDFReportService = Depends(PDFReportService),
):
"""
Генерирует PDF отчет по интервью
Запускает асинхронную генерацию PDF отчета по интервью
Проверяет наличие отчета в базе данных и генерирует PDF файл.
Проверяет наличие отчета в базе данных и запускает Celery задачу для генерации PDF файла.
Если PDF уже существует, возвращает существующий URL.
"""
from sqlmodel import select
from app.models.interview import InterviewSession
from app.models.interview_report import InterviewReport
from celery_worker.tasks import generate_pdf_report_task
# Проверяем, существует ли резюме
resume = await resume_repo.get_by_id(resume_id)
@ -346,16 +345,15 @@ async def generate_pdf_report(
# Если PDF уже существует, возвращаем его
if report.pdf_report_url:
return PDFGenerationResponse(
message="PDF report already exists",
resume_id=resume_id,
candidate_name=resume.applicant_name,
pdf_url=report.pdf_report_url,
status="exists",
)
return {
"message": "PDF report already exists",
"resume_id": resume_id,
"report_id": report.id,
"candidate_name": resume.applicant_name,
"pdf_url": report.pdf_report_url,
"status": "exists",
}
# Генерируем PDF отчет
try:
# Получаем позицию из связанной вакансии
from app.models.vacancy import Vacancy
@ -365,38 +363,114 @@ async def generate_pdf_report(
position = vacancy.title if vacancy else "Позиция не указана"
# Генерируем и загружаем PDF
pdf_url = await pdf_report_service.generate_and_upload_pdf(
report, resume.applicant_name, position
)
# Сериализуем данные отчета
report_data = {
"id": report.id,
"interview_session_id": report.interview_session_id,
"technical_skills_score": report.technical_skills_score,
"technical_skills_justification": report.technical_skills_justification,
"technical_skills_concerns": report.technical_skills_concerns,
"experience_relevance_score": report.experience_relevance_score,
"experience_relevance_justification": report.experience_relevance_justification,
"experience_relevance_concerns": report.experience_relevance_concerns,
"communication_score": report.communication_score,
"communication_justification": report.communication_justification,
"communication_concerns": report.communication_concerns,
"problem_solving_score": report.problem_solving_score,
"problem_solving_justification": report.problem_solving_justification,
"problem_solving_concerns": report.problem_solving_concerns,
"cultural_fit_score": report.cultural_fit_score,
"cultural_fit_justification": report.cultural_fit_justification,
"cultural_fit_concerns": report.cultural_fit_concerns,
"overall_score": report.overall_score,
"recommendation": report.recommendation,
"strengths": report.strengths,
"weaknesses": report.weaknesses,
"red_flags": report.red_flags,
"questions_quality_score": report.questions_quality_score,
"interview_duration_minutes": report.interview_duration_minutes,
"response_count": report.response_count,
"dialogue_messages_count": report.dialogue_messages_count,
"next_steps": report.next_steps,
"interviewer_notes": report.interviewer_notes,
"questions_analysis": report.questions_analysis,
"analysis_method": report.analysis_method,
"llm_model_used": report.llm_model_used,
"analysis_duration_seconds": report.analysis_duration_seconds,
"pdf_report_url": report.pdf_report_url,
"created_at": report.created_at.isoformat() if report.created_at else None,
"updated_at": report.updated_at.isoformat() if report.updated_at else None,
}
if not pdf_url:
raise HTTPException(
status_code=500, detail="Failed to generate or upload PDF report"
)
# Обновляем отчет в БД
from sqlmodel import update
stmt = (
update(InterviewReport)
.where(InterviewReport.id == report.id)
.values(pdf_report_url=pdf_url)
)
await session.execute(stmt)
await session.commit()
return PDFGenerationResponse(
message="PDF report generated successfully",
resume_id=resume_id,
# Запускаем Celery задачу для генерации PDF
task = generate_pdf_report_task.delay(
report_data=report_data,
candidate_name=resume.applicant_name,
pdf_url=pdf_url,
status="generated",
position=position,
resume_file_url=resume.resume_file_url,
)
return {
"message": "PDF generation started",
"resume_id": resume_id,
"report_id": report.id,
"candidate_name": resume.applicant_name,
"task_id": task.id,
"status": "in_progress",
}
@router.get("/pdf-task-status/{task_id}")
async def get_pdf_task_status(task_id: str):
"""
Получить статус выполнения Celery задачи генерации PDF
"""
from celery_worker.celery_app import celery_app
try:
task_result = celery_app.AsyncResult(task_id)
if task_result.state == "PENDING":
return {
"task_id": task_id,
"status": "pending",
"message": "Task is waiting to be processed",
}
elif task_result.state == "PROGRESS":
return {
"task_id": task_id,
"status": "in_progress",
"progress": task_result.info.get("progress", 0),
"message": task_result.info.get("status", "Processing..."),
}
elif task_result.state == "SUCCESS":
result = task_result.result
return {
"task_id": task_id,
"status": "completed",
"progress": 100,
"message": "PDF generation completed successfully",
"pdf_url": result.get("pdf_url"),
"file_size": result.get("file_size"),
"report_id": result.get("interview_report_id"),
}
elif task_result.state == "FAILURE":
return {
"task_id": task_id,
"status": "failed",
"message": str(task_result.info),
"error": str(task_result.info),
}
else:
return {
"task_id": task_id,
"status": task_result.state.lower(),
"message": f"Task state: {task_result.state}",
}
except Exception as e:
raise HTTPException(
status_code=500, detail=f"Error generating PDF report: {str(e)}"
status_code=500, detail=f"Error checking task status: {str(e)}"
)

View File

@ -1,15 +1,15 @@
from fastapi import APIRouter, Depends, HTTPException, Request
from typing import List
from fastapi import APIRouter, Depends, HTTPException
from app.core.session_middleware import get_current_session
from app.models.session import Session
from app.models.interview_report import InterviewReport
from app.models.session import Session
from app.services.interview_reports_service import InterviewReportService
router = APIRouter(prefix="/interview-reports", tags=["interview-reports"])
@router.get("/vacancy/{vacancy_id}", response_model=List[InterviewReport])
@router.get("/vacancy/{vacancy_id}", response_model=list[InterviewReport])
async def get_reports_by_vacancy(
vacancy_id: int,
current_session: Session = Depends(get_current_session),

View File

@ -2,14 +2,15 @@ from fastapi import APIRouter, Depends, File, Form, HTTPException, Query, Upload
from pydantic import BaseModel
from app.models.vacancy import VacancyCreate, VacancyRead, VacancyUpdate
from app.services.vacancy_service import VacancyService
from app.services.vacancy_parser_service import vacancy_parser_service
from app.services.vacancy_service import VacancyService
router = APIRouter(prefix="/vacancies", tags=["vacancies"])
class VacancyParseResponse(BaseModel):
"""Ответ на запрос парсинга вакансии"""
message: str
parsed_data: dict | None = None
task_id: str | None = None
@ -110,26 +111,32 @@ async def parse_vacancy_from_file(
if not file.filename:
raise HTTPException(status_code=400, detail="Имя файла не указано")
file_extension = file.filename.lower().split('.')[-1]
supported_formats = ['pdf', 'docx', 'rtf', 'txt']
file_extension = file.filename.lower().split(".")[-1]
supported_formats = ["pdf", "docx", "rtf", "txt"]
if file_extension not in supported_formats:
raise HTTPException(
status_code=400,
detail=f"Неподдерживаемый формат файла. Поддерживаются: {', '.join(supported_formats)}"
detail=f"Неподдерживаемый формат файла. Поддерживаются: {', '.join(supported_formats)}",
)
# Проверяем размер файла (максимум 10MB)
file_content = await file.read()
if len(file_content) > 10 * 1024 * 1024:
raise HTTPException(status_code=400, detail="Файл слишком большой (максимум 10MB)")
raise HTTPException(
status_code=400, detail="Файл слишком большой (максимум 10MB)"
)
try:
# Извлекаем текст из файла
raw_text = vacancy_parser_service.extract_text_from_file(file_content, file.filename)
raw_text = vacancy_parser_service.extract_text_from_file(
file_content, file.filename
)
if not raw_text.strip():
raise HTTPException(status_code=400, detail="Не удалось извлечь текст из файла")
raise HTTPException(
status_code=400, detail="Не удалось извлечь текст из файла"
)
# Парсим с помощью AI
parsed_data = await vacancy_parser_service.parse_vacancy_with_ai(raw_text)
@ -144,22 +151,21 @@ async def parse_vacancy_from_file(
# Возвращаем парсинг, но предупреждаем об ошибке создания
return VacancyParseResponse(
message=f"Парсинг выполнен, но ошибка при создании вакансии: {str(e)}",
parsed_data=parsed_data
parsed_data=parsed_data,
)
response_message = "Парсинг выполнен успешно"
if created_vacancy:
response_message += f". Вакансия создана с ID: {created_vacancy.id}"
return VacancyParseResponse(
message=response_message,
parsed_data=parsed_data
)
return VacancyParseResponse(message=response_message, parsed_data=parsed_data)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=f"Ошибка при парсинге вакансии: {str(e)}")
raise HTTPException(
status_code=500, detail=f"Ошибка при парсинге вакансии: {str(e)}"
)
@router.post("/parse-text", response_model=VacancyParseResponse)
@ -180,10 +186,14 @@ async def parse_vacancy_from_text(
"""
if not text.strip():
raise HTTPException(status_code=400, detail="Текст вакансии не может быть пустым")
raise HTTPException(
status_code=400, detail="Текст вакансии не может быть пустым"
)
if len(text) > 50000: # Ограничение на длину текста
raise HTTPException(status_code=400, detail="Текст слишком длинный (максимум 50000 символов)")
raise HTTPException(
status_code=400, detail="Текст слишком длинный (максимум 50000 символов)"
)
try:
# Парсим с помощью AI
@ -198,22 +208,21 @@ async def parse_vacancy_from_text(
except Exception as e:
return VacancyParseResponse(
message=f"Парсинг выполнен, но ошибка при создании вакансии: {str(e)}",
parsed_data=parsed_data
parsed_data=parsed_data,
)
response_message = "Парсинг выполнен успешно"
if created_vacancy:
response_message += f". Вакансия создана с ID: {created_vacancy.id}"
return VacancyParseResponse(
message=response_message,
parsed_data=parsed_data
)
return VacancyParseResponse(message=response_message, parsed_data=parsed_data)
except ValueError as e:
raise HTTPException(status_code=400, detail=str(e))
except Exception as e:
raise HTTPException(status_code=500, detail=f"Ошибка при парсинге вакансии: {str(e)}")
raise HTTPException(
status_code=500, detail=f"Ошибка при парсинге вакансии: {str(e)}"
)
@router.get("/parse-formats")
@ -226,33 +235,21 @@ async def get_supported_formats():
"""
return {
"supported_formats": [
{
"extension": "pdf",
"description": "PDF документы",
"max_size_mb": 10
},
{"extension": "pdf", "description": "PDF документы", "max_size_mb": 10},
{
"extension": "docx",
"description": "Microsoft Word документы",
"max_size_mb": 10
"max_size_mb": 10,
},
{
"extension": "rtf",
"description": "Rich Text Format",
"max_size_mb": 10
},
{
"extension": "txt",
"description": "Текстовые файлы",
"max_size_mb": 10
}
{"extension": "rtf", "description": "Rich Text Format", "max_size_mb": 10},
{"extension": "txt", "description": "Текстовые файлы", "max_size_mb": 10},
],
"features": [
"Автоматическое извлечение текста из файлов",
"AI-парсинг структурированной информации",
"Создание вакансии в базе данных",
"Валидация данных"
]
"Валидация данных",
],
}
@ -272,49 +269,54 @@ async def parse_vacancy_from_file_async(
dict: ID задачи для отслеживания статуса
"""
import base64
from celery_worker.tasks import parse_vacancy_task
# Проверяем формат файла
if not file.filename:
raise HTTPException(status_code=400, detail="Имя файла не указано")
file_extension = file.filename.lower().split('.')[-1]
supported_formats = ['pdf', 'docx', 'rtf', 'txt']
file_extension = file.filename.lower().split(".")[-1]
supported_formats = ["pdf", "docx", "rtf", "txt"]
if file_extension not in supported_formats:
raise HTTPException(
status_code=400,
detail=f"Неподдерживаемый формат файла. Поддерживаются: {', '.join(supported_formats)}"
detail=f"Неподдерживаемый формат файла. Поддерживаются: {', '.join(supported_formats)}",
)
# Проверяем размер файла (максимум 10MB)
file_content = await file.read()
if len(file_content) > 10 * 1024 * 1024:
raise HTTPException(status_code=400, detail="Файл слишком большой (максимум 10MB)")
raise HTTPException(
status_code=400, detail="Файл слишком большой (максимум 10MB)"
)
try:
# Кодируем содержимое файла в base64 для передачи в Celery
file_content_base64 = base64.b64encode(file_content).decode('utf-8')
file_content_base64 = base64.b64encode(file_content).decode("utf-8")
# Конвертируем строку в boolean
create_vacancy_bool = create_vacancy.lower() in ('true', '1', 'yes', 'on')
create_vacancy_bool = create_vacancy.lower() in ("true", "1", "yes", "on")
# Запускаем асинхронную задачу
task = parse_vacancy_task.delay(
file_content_base64=file_content_base64,
filename=file.filename,
create_vacancy=create_vacancy_bool
create_vacancy=create_vacancy_bool,
)
return {
"message": "Задача парсинга запущена",
"task_id": task.id,
"status": "pending",
"check_status_url": f"/api/v1/vacancies/parse-status/{task.id}"
"check_status_url": f"/api/v1/vacancies/parse-status/{task.id}",
}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Ошибка при запуске парсинга: {str(e)}")
raise HTTPException(
status_code=500, detail=f"Ошибка при запуске парсинга: {str(e)}"
)
@router.get("/parse-status/{task_id}")
@ -333,38 +335,40 @@ async def get_parse_status(task_id: str):
try:
task = celery_app.AsyncResult(task_id)
if task.state == 'PENDING':
if task.state == "PENDING":
response = {
'task_id': task_id,
'state': task.state,
'status': 'Задача ожидает выполнения...',
'progress': 0
"task_id": task_id,
"state": task.state,
"status": "Задача ожидает выполнения...",
"progress": 0,
}
elif task.state == 'PROGRESS':
elif task.state == "PROGRESS":
response = {
'task_id': task_id,
'state': task.state,
'status': task.info.get('status', ''),
'progress': task.info.get('progress', 0)
"task_id": task_id,
"state": task.state,
"status": task.info.get("status", ""),
"progress": task.info.get("progress", 0),
}
elif task.state == 'SUCCESS':
elif task.state == "SUCCESS":
response = {
'task_id': task_id,
'state': task.state,
'status': 'completed',
'progress': 100,
'result': task.result
"task_id": task_id,
"state": task.state,
"status": "completed",
"progress": 100,
"result": task.result,
}
else: # FAILURE
response = {
'task_id': task_id,
'state': task.state,
'status': 'failed',
'progress': 0,
'error': str(task.info)
"task_id": task_id,
"state": task.state,
"status": "failed",
"progress": 0,
"error": str(task.info),
}
return response
except Exception as e:
raise HTTPException(status_code=500, detail=f"Ошибка при получении статуса задачи: {str(e)}")
raise HTTPException(
status_code=500, detail=f"Ошибка при получении статуса задачи: {str(e)}"
)

View File

@ -99,7 +99,9 @@ class AgentManager:
# Запускаем мониторинг команд
if not self._monitoring_task:
self._monitoring_task = asyncio.create_task(self._monitor_commands())
self._monitoring_task = asyncio.create_task(
self._monitor_commands()
)
return True
@ -259,7 +261,9 @@ class AgentManager:
"""Обрабатывает сигнал о завершении сессии от агента"""
async with self._lock:
if not self._agent_process:
logger.warning(f"No agent process to handle session_completed for {session_id}")
logger.warning(
f"No agent process to handle session_completed for {session_id}"
)
return False
if self._agent_process.session_id != session_id:
@ -281,7 +285,9 @@ class AgentManager:
self._agent_process.room_name = None
self._agent_process.status = "idle"
logger.info(f"Agent automatically released from session {old_session_id}")
logger.info(
f"Agent automatically released from session {old_session_id}"
)
return True
except Exception as e:
@ -353,20 +359,27 @@ class AgentManager:
while True:
try:
if os.path.exists(command_file):
with open(command_file, "r", encoding="utf-8") as f:
with open(command_file, encoding="utf-8") as f:
command = json.load(f)
# Проверяем timestamp чтобы избежать повторной обработки
command_timestamp = command.get("timestamp")
if command_timestamp and command_timestamp != last_processed_timestamp:
if (
command_timestamp
and command_timestamp != last_processed_timestamp
):
action = command.get("action")
if action == "session_completed":
session_id = command.get("session_id")
room_name = command.get("room_name")
logger.info(f"[MONITOR] Processing session_completed for {session_id}")
await self.handle_session_completed(session_id, room_name)
logger.info(
f"[MONITOR] Processing session_completed for {session_id}"
)
await self.handle_session_completed(
session_id, room_name
)
last_processed_timestamp = command_timestamp

View File

@ -10,7 +10,9 @@ from app.repositories.interview_reports_repository import InterviewReportReposit
class InterviewReportService:
def __init__(
self,
report_repo: Annotated[InterviewReportRepository, Depends(InterviewReportRepository)],
report_repo: Annotated[
InterviewReportRepository, Depends(InterviewReportRepository)
],
):
self.report_repo = report_repo
@ -22,9 +24,7 @@ class InterviewReportService:
"""Получить все отчёты по вакансии"""
return await self.report_repo.get_by_vacancy_id(vacancy_id)
async def update_report_scores(
self, report_id: int, scores: dict
) -> bool:
async def update_report_scores(self, report_id: int, scores: dict) -> bool:
"""
Обновить оценки отчёта.
Пример scores:

View File

@ -1,9 +1,13 @@
import io
import os
import shutil
import tempfile
from datetime import datetime
from urllib.parse import quote
import requests
from jinja2 import Template
import pdfkit
from playwright.async_api import async_playwright
from app.core.s3 import s3_service
from app.models.interview_report import InterviewReport, RecommendationType
@ -14,11 +18,149 @@ class PDFReportService:
def __init__(self):
self.template_path = "templates/interview_report.html"
self._setup_fonts()
def _download_font(self, url: str, dest_path: str) -> str:
"""Скачивает шрифт по URL в dest_path (перезаписывает если нужно)."""
os.makedirs(os.path.dirname(dest_path), exist_ok=True)
try:
resp = requests.get(url, stream=True, timeout=15)
resp.raise_for_status()
with open(dest_path, "wb") as f:
shutil.copyfileobj(resp.raw, f)
print(f"[OK] Downloaded font {url} -> {dest_path}")
return dest_path
except Exception as e:
print(f"[ERROR] Failed to download font {url}: {e}")
raise
def _register_local_fonts(self, regular_path: str, bold_path: str):
"""Регистрирует шрифты в ReportLab, чтобы xhtml2pdf мог ими пользоваться."""
try:
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
pdfmetrics.registerFont(TTFont("DejaVuSans", regular_path))
pdfmetrics.registerFont(TTFont("DejaVuSans-Bold", bold_path))
# mapping: family, bold(1)/normal(0), italic(1)/normal(0), fontkey
addMapping("DejaVuSans", 0, 0, "DejaVuSans")
addMapping("DejaVuSans", 1, 0, "DejaVuSans-Bold")
self.available_fonts = ["DejaVuSans", "DejaVuSans-Bold"]
print("[OK] Registered DejaVu fonts in ReportLab")
except Exception as e:
print(f"[ERROR] Register fonts failed: {e}")
self.available_fonts = []
def _setup_fonts(self):
"""Настройка русских шрифтов для xhtml2pdf"""
self.available_fonts = []
try:
from reportlab.lib.fonts import addMapping
from reportlab.pdfbase import pdfmetrics
from reportlab.pdfbase.ttfonts import TTFont
# Используем скачанные DejaVu шрифты
fonts_dir = "static/fonts"
font_paths = [
(os.path.join(fonts_dir, "DejaVuSans.ttf"), "DejaVu", False, False),
(os.path.join(fonts_dir, "DejaVuSans-Bold.ttf"), "DejaVu", True, False),
]
for font_path, font_name, is_bold, is_italic in font_paths:
if os.path.exists(font_path):
try:
font_key = f"{font_name}"
if is_bold:
font_key += "-Bold"
if is_italic:
font_key += "-Italic"
# Проверяем, что шрифт можно загрузить
test_font = TTFont(font_key, font_path)
pdfmetrics.registerFont(test_font)
addMapping(font_name, is_bold, is_italic, font_key)
self.available_fonts.append(font_key)
print(f"[OK] Successfully registered font: {font_key}")
except Exception as e:
print(f"[ERROR] Failed to register font {font_path}: {e}")
else:
print(f"[ERROR] Font file not found: {font_path}")
except Exception as e:
print(f"[ERROR] Font setup failed: {e}")
print(f"Available fonts: {self.available_fonts}")
def _get_font_css(self) -> str:
"""Возвращает CSS с подключением локальных шрифтов (скачивает при необходимости)."""
# paths локальные
fonts_dir = os.path.abspath("static/fonts").replace("\\", "/")
regular_local = os.path.join(fonts_dir, "DejaVuSans.ttf").replace("\\", "/")
bold_local = os.path.join(fonts_dir, "DejaVuSans-Bold.ttf").replace("\\", "/")
# твои удалённые URL (используй свои)
remote_regular = (
"https://d8d88bee-afd2-4266-8332-538389e25f52.selstorage.ru/DejaVuSans.ttf"
)
remote_bold = "https://d8d88bee-afd2-4266-8332-538389e25f52.selstorage.ru/DejaVuSans-Bold.ttf"
# скачиваем если локально нет
try:
if not os.path.exists(regular_local) or os.path.getsize(regular_local) == 0:
self._download_font(remote_regular, regular_local)
if not os.path.exists(bold_local) or os.path.getsize(bold_local) == 0:
self._download_font(remote_bold, bold_local)
except Exception as e:
print("[WARNING] Failed to ensure local fonts:", e)
# регистрируем в ReportLab (чтобы гарантировать поддержку кириллицы)
try:
self._register_local_fonts(regular_local, bold_local)
except Exception as e:
print("[WARNING] Font registration error:", e)
# используем file:/// абсолютный путь в src и УБИРАЕМ format('...') — это важно
# url-энкодим путь на случай пробелов
reg_quoted = quote(regular_local)
bold_quoted = quote(bold_local)
font_css = f"""
<style>
@font-face {{
font-family: 'DejaVuSans';
src: url('file:///{reg_quoted}');
font-weight: normal;
font-style: normal;
}}
@font-face {{
font-family: 'DejaVuSans';
src: url('file:///{bold_quoted}');
font-weight: bold;
font-style: normal;
}}
/* Применяем семейство без !important, чтобы не ломать шаблон */
body, * {{
font-family: 'DejaVuSans', Arial, sans-serif;
}}
@page {{
size: A4;
margin: 0.75in;
}}
</style>
"""
return font_css
def _load_html_template(self) -> str:
"""Загружает HTML шаблон из файла"""
try:
with open(self.template_path, 'r', encoding='utf-8') as file:
with open(self.template_path, encoding="utf-8") as file:
return file.read()
except FileNotFoundError:
raise FileNotFoundError(f"HTML шаблон не найден: {self.template_path}")
@ -35,25 +177,74 @@ class PDFReportService:
else:
return str(concerns)
def _format_list_field(self, field_value) -> str:
"""Форматирует поле со списком для отображения"""
if not field_value:
return "Не указаны"
if isinstance(field_value, list):
return "\n".join([f"{item}" for item in field_value])
elif isinstance(field_value, str):
return field_value
else:
return str(field_value)
def _get_score_class(self, score: int) -> str:
"""Возвращает CSS класс для цвета оценки"""
if score >= 80:
return "score-green"
if score >= 90:
return "score-green" # STRONGLY_RECOMMEND
elif score >= 75:
return "score-light-green" # RECOMMEND
elif score >= 60:
return "score-orange"
return "score-orange" # CONSIDER
else:
return "score-red"
return "score-red" # REJECT
def _format_recommendation(self, recommendation: RecommendationType) -> tuple:
"""Форматирует рекомендацию для отображения"""
if recommendation == RecommendationType.HIRE:
if recommendation == RecommendationType.STRONGLY_RECOMMEND:
return ("Настоятельно рекомендуем", "recommend-button")
elif recommendation == RecommendationType.RECOMMEND:
return ("Рекомендуем", "recommend-button")
elif recommendation == RecommendationType.CONSIDER:
return ("К рассмотрению", "consider-button")
else:
else: # REJECT
return ("Не рекомендуем", "reject-button")
def generate_pdf_report(self, interview_report: InterviewReport) -> bytes:
def link_callback(self, uri, rel):
"""Скачивает удалённый ресурс в temp файл и возвращает путь (для xhtml2pdf)."""
# remote -> сохранить во временный файл и вернуть путь
if uri.startswith("http://") or uri.startswith("https://"):
try:
r = requests.get(uri, stream=True, timeout=15)
r.raise_for_status()
fd, tmp_path = tempfile.mkstemp(suffix=os.path.basename(uri))
with os.fdopen(fd, "wb") as f:
for chunk in r.iter_content(8192):
f.write(chunk)
return tmp_path
except Exception as e:
raise Exception(f"Не удалось скачать ресурс {uri}: {e}")
# file:///path -> без префикса
if uri.startswith("file:///"):
return uri[7:]
# локальные относительные пути
if os.path.isfile(uri):
return uri
# fallback — возвращаем как есть (pisa попробует обработать)
return uri
def fetch_resources(self, uri, rel):
# Разрешаем xhtml2pdf скачивать https
return self.link_callback(uri, rel)
async def generate_pdf_report(
self,
interview_report: InterviewReport,
candidate_name: str = None,
position: str = None,
resume_file_url: str = None,
) -> bytes:
"""
Генерирует PDF отчет на основе HTML шаблона
@ -68,119 +259,164 @@ class PDFReportService:
html_template = self._load_html_template()
# Подготавливаем данные для шаблона
template_data = self._prepare_template_data(interview_report)
template_data = self._prepare_template_data(
interview_report,
candidate_name or "Не указано",
position or "Не указана",
resume_file_url,
)
# Рендерим HTML с данными
template = Template(html_template)
rendered_html = template.render(**template_data)
# Настройки для wkhtmltopdf
options = {
'page-size': 'A4',
'margin-top': '0.75in',
'margin-right': '0.75in',
'margin-bottom': '0.75in',
'margin-left': '0.75in',
'encoding': 'UTF-8',
'no-outline': None,
'enable-local-file-access': None
}
# Получаем CSS с проверенными шрифтами
font_css = self._get_font_css()
# Генерируем PDF
pdf_bytes = pdfkit.from_string(rendered_html, False, options=options)
# Вставляем стили
if "<head>" in rendered_html:
rendered_html = rendered_html.replace("<head>", f"<head>{font_css}")
else:
rendered_html = font_css + rendered_html
with open("debug.html", "w", encoding="utf-8") as f:
f.write(rendered_html)
# Генерируем PDF из debug.html с помощью Playwright
print("[OK] Using Playwright to generate PDF from debug.html")
async def generate_pdf():
async with async_playwright() as p:
browser = await p.chromium.launch()
page = await browser.new_page()
await page.goto(f"file://{os.path.abspath('debug.html')}")
await page.wait_for_load_state("networkidle")
pdf_bytes = await page.pdf(
format="A4",
margin={
"top": "0.75in",
"bottom": "0.75in",
"left": "0.75in",
"right": "0.75in",
},
print_background=True,
)
await browser.close()
return pdf_bytes
pdf_bytes = await generate_pdf()
return pdf_bytes
except Exception as e:
raise Exception(f"Ошибка при генерации PDF: {str(e)}")
def _prepare_template_data(self, interview_report: InterviewReport) -> dict:
def _prepare_template_data(
self,
interview_report: InterviewReport,
candidate_name: str,
position: str,
resume_file_url: str = None,
) -> dict:
"""Подготавливает данные для HTML шаблона"""
# Основная информация о кандидате
candidate_name = interview_report.resume.applicant_name or "Не указано"
position = "Не указана"
# Получаем название позиции из связанной вакансии
if hasattr(interview_report.resume, 'vacancy') and interview_report.resume.vacancy:
position = interview_report.resume.vacancy.title
# Используем переданные параметры как в старой версии
resume_url = resume_file_url # Пока оставим заглушку для ссылки на резюме
# Форматируем дату интервью
interview_date = "Не указана"
if interview_report.interview_session and interview_report.interview_session.interview_start_time:
interview_date = interview_report.interview_session.interview_start_time.strftime("%d.%m.%Y %H:%M")
if (
interview_report.interview_session
and interview_report.interview_session.interview_start_time
):
interview_date = (
interview_report.interview_session.interview_start_time.strftime(
"%d.%m.%Y %H:%M"
)
)
# Общий балл и рекомендация
overall_score = interview_report.overall_score or 0
recommendation_text, recommendation_class = self._format_recommendation(interview_report.recommendation)
recommendation_text, recommendation_class = self._format_recommendation(
interview_report.recommendation
)
# Сильные стороны и области развития
strengths = self._format_concerns_field(interview_report.strengths_concerns) if interview_report.strengths_concerns else "Не указаны"
areas_for_development = self._format_concerns_field(interview_report.areas_for_development_concerns) if interview_report.areas_for_development_concerns else "Не указаны"
# Сильные стороны и области развития (используем правильные поля модели)
strengths = (
self._format_list_field(interview_report.strengths)
if interview_report.strengths
else "Не указаны"
)
areas_for_development = (
self._format_list_field(interview_report.weaknesses)
if interview_report.weaknesses
else "Не указаны"
)
# Детальная оценка
evaluation_criteria = []
# Детальная оценка - всегда все критерии, как в старой версии
evaluation_criteria = [
{
"name": "Технические навыки",
"score": interview_report.technical_skills_score or 0,
"score_class": self._get_score_class(
interview_report.technical_skills_score or 0
),
"justification": interview_report.technical_skills_justification or "",
"concerns": self._format_concerns_field(
interview_report.technical_skills_concerns
),
},
{
"name": "Релевантность опыта",
"score": interview_report.experience_relevance_score or 0,
"score_class": self._get_score_class(
interview_report.experience_relevance_score or 0
),
"justification": interview_report.experience_relevance_justification
or "",
"concerns": self._format_concerns_field(
interview_report.experience_relevance_concerns
),
},
{
"name": "Коммуникация",
"score": interview_report.communication_score or 0,
"score_class": self._get_score_class(
interview_report.communication_score or 0
),
"justification": interview_report.communication_justification or "",
"concerns": self._format_concerns_field(
interview_report.communication_concerns
),
},
{
"name": "Решение задач",
"score": interview_report.problem_solving_score or 0,
"score_class": self._get_score_class(
interview_report.problem_solving_score or 0
),
"justification": interview_report.problem_solving_justification or "",
"concerns": self._format_concerns_field(
interview_report.problem_solving_concerns
),
},
{
"name": "Культурное соответствие",
"score": interview_report.cultural_fit_score or 0,
"score_class": self._get_score_class(
interview_report.cultural_fit_score or 0
),
"justification": interview_report.cultural_fit_justification or "",
"concerns": self._format_concerns_field(
interview_report.cultural_fit_concerns
),
},
]
# Технические навыки
if interview_report.technical_skills_score is not None:
evaluation_criteria.append({
'name': 'Технические навыки',
'score': interview_report.technical_skills_score,
'score_class': self._get_score_class(interview_report.technical_skills_score),
'justification': interview_report.technical_skills_justification or "",
'concerns': self._format_concerns_field(interview_report.technical_skills_concerns)
})
# Красные флаги - используем поле модели напрямую
red_flags = interview_report.red_flags or []
# Релевантность опыта
if interview_report.experience_relevance_score is not None:
evaluation_criteria.append({
'name': 'Релевантность опыта',
'score': interview_report.experience_relevance_score,
'score_class': self._get_score_class(interview_report.experience_relevance_score),
'justification': interview_report.experience_relevance_justification or "",
'concerns': self._format_concerns_field(interview_report.experience_relevance_concerns)
})
# Коммуникация
if interview_report.communication_score is not None:
evaluation_criteria.append({
'name': 'Коммуникация',
'score': interview_report.communication_score,
'score_class': self._get_score_class(interview_report.communication_score),
'justification': interview_report.communication_justification or "",
'concerns': self._format_concerns_field(interview_report.communication_concerns)
})
# Решение задач
if interview_report.problem_solving_score is not None:
evaluation_criteria.append({
'name': 'Решение задач',
'score': interview_report.problem_solving_score,
'score_class': self._get_score_class(interview_report.problem_solving_score),
'justification': interview_report.problem_solving_justification or "",
'concerns': self._format_concerns_field(interview_report.problem_solving_concerns)
})
# Культурное соответствие
if interview_report.cultural_fit_score is not None:
evaluation_criteria.append({
'name': 'Культурное соответствие',
'score': interview_report.cultural_fit_score,
'score_class': self._get_score_class(interview_report.cultural_fit_score),
'justification': interview_report.cultural_fit_justification or "",
'concerns': self._format_concerns_field(interview_report.cultural_fit_concerns)
})
# Красные флаги
red_flags = []
if interview_report.red_flags:
if isinstance(interview_report.red_flags, list):
red_flags = interview_report.red_flags
elif isinstance(interview_report.red_flags, str):
red_flags = [interview_report.red_flags]
# Ссылка на резюме
resume_url = interview_report.resume.file_url if interview_report.resume.file_url else "#"
# Ссылка на резюме (уже определена выше)
# ID отчета
report_id = f"#{interview_report.id}" if interview_report.id else "#0"
@ -189,19 +425,19 @@ class PDFReportService:
generation_date = datetime.now().strftime("%d.%m.%Y %H:%M")
return {
'report_id': report_id,
'candidate_name': candidate_name,
'position': position,
'interview_date': interview_date,
'overall_score': overall_score,
'recommendation_text': recommendation_text,
'recommendation_class': recommendation_class,
'strengths': strengths,
'areas_for_development': areas_for_development,
'evaluation_criteria': evaluation_criteria,
'red_flags': red_flags,
'resume_url': resume_url,
'generation_date': generation_date
"report_id": report_id,
"candidate_name": candidate_name,
"position": position,
"interview_date": interview_date,
"overall_score": overall_score,
"recommendation_text": recommendation_text,
"recommendation_class": recommendation_class,
"strengths": strengths,
"areas_for_development": areas_for_development,
"evaluation_criteria": evaluation_criteria,
"red_flags": red_flags,
"resume_url": resume_url,
"generation_date": generation_date,
}
async def upload_pdf_to_s3(self, pdf_bytes: bytes, filename: str) -> str:
@ -220,10 +456,7 @@ class PDFReportService:
# Загружаем с публичным доступом
file_url = await s3_service.upload_file(
pdf_stream,
filename,
content_type="application/pdf",
public=True
pdf_stream, filename, content_type="application/pdf", public=True
)
return file_url
@ -231,7 +464,13 @@ class PDFReportService:
except Exception as e:
raise Exception(f"Ошибка при загрузке PDF в S3: {str(e)}")
async def generate_and_upload_pdf(self, report: InterviewReport, candidate_name: str = None, position: str = None) -> str:
async def generate_and_upload_pdf(
self,
report: InterviewReport,
candidate_name: str = None,
position: str = None,
resume_file_url: str = None,
) -> str:
"""
Генерирует PDF отчет и загружает его в S3 (метод обратной совместимости)
@ -245,11 +484,20 @@ class PDFReportService:
"""
try:
# Генерируем PDF
pdf_bytes = self.generate_pdf_report(report)
pdf_bytes = await self.generate_pdf_report(
report, candidate_name, position, resume_file_url
)
# Создаем имя файла
safe_name = report.resume.applicant_name or "candidate"
safe_name = "".join(c for c in safe_name if c.isalnum() or c in (' ', '-', '_')).strip()
# Создаем имя файла - используем переданный параметр как в старой версии
safe_name = (
candidate_name
if candidate_name and candidate_name != "Не указано"
else "candidate"
)
safe_name = "".join(
c for c in safe_name if c.isalnum() or c in (" ", "-", "_")
).strip()
filename = f"interview_report_{safe_name}_{report.id}.pdf"
# Загружаем в S3

View File

@ -2,7 +2,7 @@ import io
import json
import logging
from pathlib import Path
from typing import Any, Dict
from typing import Any
logger = logging.getLogger(__name__)
@ -11,7 +11,7 @@ class VacancyParserService:
"""Сервис для парсинга вакансий из файлов различных форматов"""
def __init__(self):
self.supported_formats = ['.pdf', '.docx', '.rtf', '.txt']
self.supported_formats = [".pdf", ".docx", ".rtf", ".txt"]
def extract_text_from_file(self, file_content: bytes, filename: str) -> str:
"""
@ -27,13 +27,13 @@ class VacancyParserService:
file_extension = Path(filename).suffix.lower()
try:
if file_extension == '.pdf':
if file_extension == ".pdf":
return self._extract_from_pdf(file_content)
elif file_extension == '.docx':
elif file_extension == ".docx":
return self._extract_from_docx(file_content)
elif file_extension == '.rtf':
elif file_extension == ".rtf":
return self._extract_from_rtf(file_content)
elif file_extension == '.txt':
elif file_extension == ".txt":
return self._extract_from_txt(file_content)
else:
raise ValueError(f"Неподдерживаемый формат файла: {file_extension}")
@ -73,7 +73,9 @@ class VacancyParserService:
return text.strip()
except ImportError:
raise ImportError("Требуется установить PyPDF2 или pdfplumber: pip install PyPDF2 pdfplumber")
raise ImportError(
"Требуется установить PyPDF2 или pdfplumber: pip install PyPDF2 pdfplumber"
)
def _extract_from_docx(self, file_content: bytes) -> str:
"""Извлекает текст из DOCX файла"""
@ -97,25 +99,27 @@ class VacancyParserService:
return text.strip()
except ImportError:
raise ImportError("Требуется установить python-docx: pip install python-docx")
raise ImportError(
"Требуется установить python-docx: pip install python-docx"
)
def _extract_from_rtf(self, file_content: bytes) -> str:
"""Извлекает текст из RTF файла"""
try:
from striprtf.striprtf import rtf_to_text
rtf_content = file_content.decode('utf-8', errors='ignore')
rtf_content = file_content.decode("utf-8", errors="ignore")
text = rtf_to_text(rtf_content)
return text.strip()
except ImportError:
raise ImportError("Требуется установить striprtf: pip install striprtf")
except Exception as e:
except Exception:
# Альтернативный метод через pyth
try:
from pyth.plugins.rtf15.reader import Rtf15Reader
from pyth.plugins.plaintext.writer import PlaintextWriter
from pyth.plugins.rtf15.reader import Rtf15Reader
doc = Rtf15Reader.read(io.BytesIO(file_content))
text = PlaintextWriter.write(doc).getvalue()
@ -123,13 +127,15 @@ class VacancyParserService:
return text.strip()
except ImportError:
raise ImportError("Требуется установить striprtf или pyth: pip install striprtf pyth")
raise ImportError(
"Требуется установить striprtf или pyth: pip install striprtf pyth"
)
def _extract_from_txt(self, file_content: bytes) -> str:
"""Извлекает текст из TXT файла"""
try:
# Пробуем различные кодировки
encodings = ['utf-8', 'windows-1251', 'cp1252', 'iso-8859-1']
encodings = ["utf-8", "windows-1251", "cp1252", "iso-8859-1"]
for encoding in encodings:
try:
@ -139,14 +145,14 @@ class VacancyParserService:
continue
# Если все кодировки не подошли, используем errors='ignore'
text = file_content.decode('utf-8', errors='ignore')
text = file_content.decode("utf-8", errors="ignore")
return text.strip()
except Exception as e:
logger.error(f"Ошибка при чтении txt файла: {str(e)}")
raise
async def parse_vacancy_with_ai(self, raw_text: str) -> Dict[str, Any]:
async def parse_vacancy_with_ai(self, raw_text: str) -> dict[str, Any]:
"""
Парсит текст вакансии с помощью AI для извлечения структурированной информации
@ -225,38 +231,39 @@ class VacancyParserService:
logger.error(f"Ошибка при парсинге вакансии через AI: {str(e)}")
raise
def _validate_parsed_data(self, data: Dict[str, Any]) -> Dict[str, Any]:
def _validate_parsed_data(self, data: dict[str, Any]) -> dict[str, Any]:
"""Валидирует и очищает спарсенные данные"""
from app.models.vacancy import EmploymentType, Experience, Schedule
# Обязательные поля с дефолтными значениями
validated_data = {
'title': data.get('title', 'Название не указано'),
'description': data.get('description', 'Описание не указано'),
'key_skills': data.get('key_skills'),
'employment_type': self._validate_enum(
data.get('employment_type'),
EmploymentType,
EmploymentType.FULL_TIME
"title": data.get("title", "Название не указано"),
"description": data.get("description", "Описание не указано"),
"key_skills": data.get("key_skills"),
"employment_type": self._validate_enum(
data.get("employment_type"), EmploymentType, EmploymentType.FULL_TIME
),
'experience': self._validate_enum(
data.get('experience'),
Experience,
Experience.BETWEEN_1_AND_3
"experience": self._validate_enum(
data.get("experience"), Experience, Experience.BETWEEN_1_AND_3
),
'schedule': self._validate_enum(
data.get('schedule'),
Schedule,
Schedule.FULL_DAY
"schedule": self._validate_enum(
data.get("schedule"), Schedule, Schedule.FULL_DAY
),
'company_name': data.get('company_name'),
'area_name': data.get('area_name'),
"company_name": data.get("company_name"),
"area_name": data.get("area_name"),
}
# Необязательные поля
optional_fields = [
'salary_from', 'salary_to', 'salary_currency', 'company_description',
'address', 'professional_roles', 'contacts_name', 'contacts_email', 'contacts_phone'
"salary_from",
"salary_to",
"salary_currency",
"company_description",
"address",
"professional_roles",
"contacts_name",
"contacts_email",
"contacts_phone",
]
for field in optional_fields:
@ -265,20 +272,20 @@ class VacancyParserService:
validated_data[field] = value
# Специальная обработка зарплаты
if data.get('salary_from'):
if data.get("salary_from"):
try:
validated_data['salary_from'] = int(data['salary_from'])
validated_data["salary_from"] = int(data["salary_from"])
except (ValueError, TypeError):
pass
if data.get('salary_to'):
if data.get("salary_to"):
try:
validated_data['salary_to'] = int(data['salary_to'])
validated_data["salary_to"] = int(data["salary_to"])
except (ValueError, TypeError):
pass
# Валюта по умолчанию
validated_data['salary_currency'] = data.get('salary_currency', 'RUR')
validated_data["salary_currency"] = data.get("salary_currency", "RUR")
return validated_data
@ -291,7 +298,9 @@ class VacancyParserService:
try:
return enum_class(value)
except ValueError:
logger.warning(f"Неизвестное значение {value} для {enum_class.__name__}, используем {default_value}")
logger.warning(
f"Неизвестное значение {value} для {enum_class.__name__}, используем {default_value}"
)
return default_value

View File

@ -143,21 +143,20 @@ class SyncVacancyRepository:
def create_vacancy(self, vacancy_create):
"""Создать новую вакансию"""
from datetime import datetime
from app.models.vacancy import Vacancy
# Конвертируем VacancyCreate в dict
if hasattr(vacancy_create, 'dict'):
if hasattr(vacancy_create, "dict"):
vacancy_data = vacancy_create.dict()
elif hasattr(vacancy_create, 'model_dump'):
elif hasattr(vacancy_create, "model_dump"):
vacancy_data = vacancy_create.model_dump()
else:
vacancy_data = vacancy_create
# Создаем новую вакансию
vacancy = Vacancy(
**vacancy_data,
created_at=datetime.utcnow(),
updated_at=datetime.utcnow()
**vacancy_data, created_at=datetime.utcnow(), updated_at=datetime.utcnow()
)
self.session.add(vacancy)
@ -171,3 +170,41 @@ class SyncVacancyRepository:
self.title = title
return VacancyResult(vacancy.id, vacancy.title)
class SyncInterviewReportRepository:
"""Синхронный repository для работы с InterviewReport в Celery tasks"""
def __init__(self, session: Session):
self.session = session
def get_by_id(self, report_id: int):
"""Получить отчет по ID"""
from app.models.interview_report import InterviewReport
return (
self.session.query(InterviewReport)
.filter(InterviewReport.id == report_id)
.first()
)
def update_pdf_url(self, report_id: int, pdf_url: str) -> bool:
"""Обновить ссылку на PDF отчёта"""
from datetime import datetime
from app.models.interview_report import InterviewReport
try:
report = (
self.session.query(InterviewReport)
.filter(InterviewReport.id == report_id)
.first()
)
if report:
report.pdf_report_url = pdf_url
report.updated_at = datetime.utcnow()
self.session.add(report)
return True
return False
except Exception:
return False

View File

@ -108,6 +108,7 @@ def generate_interview_report(resume_id: int):
report_instance,
resume.applicant_name,
vacancy.get("title", "Unknown Position"),
resume.resume_file_url,
)
)
@ -321,8 +322,6 @@ def _prepare_analysis_context(
# Формируем контекст
context = f"""
АНАЛИЗ КАНДИДАТА НА СОБЕСЕДОВАНИЕ
ВАКАНСИЯ:
- Позиция: {vacancy.get("title", "Не указана")}
- Описание: {vacancy.get("description", "Не указано")[:500]}
@ -337,10 +336,10 @@ def _prepare_analysis_context(
- Образование: {parsed_resume.get("education", "Не указано")}
- Предыдущие позиции: {"; ".join([pos.get("title", "") + " в " + pos.get("company", "") for pos in parsed_resume.get("work_experience", [])])}
ПЛАН ИНТЕРВЬЮ:
ПЛАН СОБЕСЕДОВАНИЯ:
{json.dumps(interview_plan, ensure_ascii=False, indent=2) if interview_plan else "План интервью не найден"}
ДИАЛОГ ИНТЕРВЬЮ:
ДИАЛОГ СОБЕСЕДОВАНИЯ:
{dialogue_text if dialogue_text else "Диалог интервью не найден или пуст"}
"""
@ -363,11 +362,15 @@ def _call_openai_for_evaluation(context: str) -> dict | None:
{context}
ЗАДАЧА:
Проанализируй кандидата и дай оценку по критериям (0-100):
1. technical_skills: Соответствие техническим требованиям
2. experience_relevance: Релевантность опыта
3. communication: Коммуникативные навыки (на основе диалога)
4. problem_solving: Навыки решения задач
Проанализируй ДИАЛОГ с кандидатом. Если кандидат ответил на вопросы и подтвердил знания из резюме, то только тогда можно считать его навыки резюме подтвержденными
и можно оценивать их соответствие вакансионным требованиям. Если клиент уклонялся от вопросов или закончил интервью раньше (или диалог выглядит неполным исходя из плана, хотя интервьюер адаптирует план и сторого ему не следует),
чем это сделал сам интервьюер, то навыки не считаются подтвержденными и по ним нельзя оценивать кандидата
Дай оценку по критериям (0-100):
1. technical_skills: Соответствие диалога (и резюме если диалог подтверждает) техническим требованиям вакансии
2. experience_relevance: Релевантность опыта судя по диалогу (и резюме если диалог подтверждает)
3. communication: Коммуникативные навыки на основе диалога
4. problem_solving: Навыки решения задач на основе диалога
5. cultural_fit: Соответствие корпоративной культуре
Для каждого критерия:
@ -587,19 +590,27 @@ def _save_report_to_db(db, resume_id: int, report: dict):
async def _generate_and_upload_pdf_report(
db, report_instance: "InterviewReport", candidate_name: str, position: str
db,
report_instance: "InterviewReport",
candidate_name: str,
position: str,
resume_file_url: str = None,
):
"""Генерирует PDF отчет и загружает его в S3"""
try:
from app.services.pdf_report_service import pdf_report_service
logger.info(
f"[PDF_GENERATION] Starting PDF generation for report ID: {report_instance.id}"
)
# Генерируем и загружаем PDF
# Генерируем и загружаем PDF - используем переданные параметры как в старой версии
pdf_url = await pdf_report_service.generate_and_upload_pdf(
report=report_instance, candidate_name=candidate_name, position=position
report=report_instance,
candidate_name=candidate_name,
position=position,
resume_file_url=resume_file_url,
)
if pdf_url:

View File

@ -581,7 +581,9 @@ def generate_interview_questions_task(self, resume_id: str, job_description: str
@celery_app.task(bind=True)
def parse_vacancy_task(self, file_content_base64: str, filename: str, create_vacancy: bool = False):
def parse_vacancy_task(
self, file_content_base64: str, filename: str, create_vacancy: bool = False
):
"""
Асинхронная задача парсинга вакансии из файла
@ -592,13 +594,14 @@ def parse_vacancy_task(self, file_content_base64: str, filename: str, create_vac
"""
try:
import base64
from app.services.vacancy_parser_service import vacancy_parser_service
from app.models.vacancy import VacancyCreate
from app.services.vacancy_parser_service import vacancy_parser_service
# Обновляем статус задачи
self.update_state(
state="PENDING",
meta={"status": "Начинаем парсинг вакансии...", "progress": 10}
meta={"status": "Начинаем парсинг вакансии...", "progress": 10},
)
# Декодируем содержимое файла
@ -607,7 +610,7 @@ def parse_vacancy_task(self, file_content_base64: str, filename: str, create_vac
# Шаг 1: Извлечение текста из файла
self.update_state(
state="PROGRESS",
meta={"status": "Извлекаем текст из файла...", "progress": 30}
meta={"status": "Извлекаем текст из файла...", "progress": 30},
)
raw_text = vacancy_parser_service.extract_text_from_file(file_content, filename)
@ -618,20 +621,25 @@ def parse_vacancy_task(self, file_content_base64: str, filename: str, create_vac
# Шаг 2: Парсинг с помощью AI
self.update_state(
state="PROGRESS",
meta={"status": "Обрабатываем текст с помощью AI...", "progress": 70}
meta={"status": "Обрабатываем текст с помощью AI...", "progress": 70},
)
import asyncio
parsed_data = asyncio.run(vacancy_parser_service.parse_vacancy_with_ai(raw_text))
parsed_data = asyncio.run(
vacancy_parser_service.parse_vacancy_with_ai(raw_text)
)
# Шаг 3: Создание вакансии (если требуется)
created_vacancy = None
print(f"create_vacancy parameter: {create_vacancy}, type: {type(create_vacancy)}")
print(
f"create_vacancy parameter: {create_vacancy}, type: {type(create_vacancy)}"
)
if create_vacancy:
self.update_state(
state="PROGRESS",
meta={"status": "Создаем вакансию в базе данных...", "progress": 90}
meta={"status": "Создаем вакансию в базе данных...", "progress": 90},
)
try:
@ -642,10 +650,13 @@ def parse_vacancy_task(self, file_content_base64: str, filename: str, create_vac
with get_sync_session() as session:
vacancy_repo = SyncVacancyRepository(session)
created_vacancy = vacancy_repo.create_vacancy(vacancy_create)
print(f"Vacancy created with ID: {created_vacancy.id if created_vacancy else 'None'}")
print(
f"Vacancy created with ID: {created_vacancy.id if created_vacancy else 'None'}"
)
except Exception as e:
import traceback
error_details = traceback.format_exc()
print(f"Error creating vacancy: {str(e)}")
print(f"Full traceback: {error_details}")
@ -657,14 +668,14 @@ def parse_vacancy_task(self, file_content_base64: str, filename: str, create_vac
"status": f"Парсинг выполнен, но ошибка при создании вакансии: {str(e)}",
"progress": 100,
"result": parsed_data,
"warning": f"Ошибка создания вакансии: {str(e)}"
}
"warning": f"Ошибка создания вакансии: {str(e)}",
},
)
return {
"status": "parsed_with_warning",
"parsed_data": parsed_data,
"warning": f"Ошибка при создании вакансии: {str(e)}"
"warning": f"Ошибка при создании вакансии: {str(e)}",
}
# Завершено успешно
@ -678,15 +689,15 @@ def parse_vacancy_task(self, file_content_base64: str, filename: str, create_vac
"status": response_message,
"progress": 100,
"result": parsed_data,
"vacancy_id": created_vacancy.id if created_vacancy else None
}
"vacancy_id": created_vacancy.id if created_vacancy else None,
},
)
return {
"status": "completed",
"parsed_data": parsed_data,
"vacancy_id": created_vacancy.id if created_vacancy else None,
"message": response_message
"message": response_message,
}
except Exception as e:
@ -696,8 +707,150 @@ def parse_vacancy_task(self, file_content_base64: str, filename: str, create_vac
meta={
"status": f"Ошибка при парсинге вакансии: {str(e)}",
"progress": 0,
"error": str(e)
}
"error": str(e),
},
)
raise Exception(f"Ошибка при парсинге вакансии: {str(e)}")
@celery_app.task(bind=True)
def generate_pdf_report_task(
self,
report_data: dict,
candidate_name: str = None,
position: str = None,
resume_file_url: str = None,
):
"""
Асинхронная задача для генерации PDF отчета по интервью
Args:
report_data: Словарь с данными отчета InterviewReport
candidate_name: Имя кандидата
position: Позиция
resume_file_url: URL резюме
"""
try:
import asyncio
from app.models.interview_report import InterviewReport
from app.services.pdf_report_service import pdf_report_service
from celery_worker.database import (
SyncInterviewReportRepository,
get_sync_session,
)
# Обновляем статус задачи
self.update_state(
state="PENDING",
meta={"status": "Начинаем генерацию PDF отчета...", "progress": 10},
)
# Создаем объект InterviewReport из переданных данных
self.update_state(
state="PROGRESS",
meta={"status": "Подготавливаем данные отчета...", "progress": 20},
)
# Подготавливаем данные для создания объекта
clean_report_data = report_data.copy()
# Обрабатываем datetime поля - убираем их, так как они не нужны для создания mock объекта
clean_report_data.pop('created_at', None)
clean_report_data.pop('updated_at', None)
# Создаем объект InterviewReport с обработанными данными
mock_report = InterviewReport(**clean_report_data)
# Генерируем PDF
self.update_state(
state="PROGRESS", meta={"status": "Генерируем PDF отчет...", "progress": 40}
)
# Запускаем асинхронную функцию в новом цикле событий
def run_pdf_generation():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
return loop.run_until_complete(
pdf_report_service.generate_pdf_report(
mock_report, candidate_name, position, resume_file_url
)
)
finally:
loop.close()
pdf_bytes = run_pdf_generation()
# Загружаем в S3
self.update_state(
state="PROGRESS",
meta={"status": "Загружаем PDF в хранилище...", "progress": 80},
)
def run_s3_upload():
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
try:
# Создаем имя файла
safe_name = (
candidate_name
if candidate_name and candidate_name != "Не указано"
else "candidate"
)
safe_name = "".join(
c for c in safe_name if c.isalnum() or c in (" ", "-", "_")
).strip()
report_id = report_data.get("id")
filename = f"interview_report_{safe_name}_{report_id}.pdf"
return loop.run_until_complete(
pdf_report_service.upload_pdf_to_s3(pdf_bytes, filename)
)
finally:
loop.close()
pdf_url = run_s3_upload()
# Обновляем отчет с URL PDF файла
self.update_state(
state="PROGRESS",
meta={"status": "Сохраняем ссылку на отчет...", "progress": 90},
)
report_id = report_data.get("id")
with get_sync_session() as session:
report_repo = SyncInterviewReportRepository(session)
report_repo.update_pdf_url(report_id, pdf_url)
# Завершено успешно
self.update_state(
state="SUCCESS",
meta={
"status": "PDF отчет успешно сгенерирован",
"progress": 100,
"pdf_url": pdf_url,
"file_size": len(pdf_bytes),
},
)
return {
"interview_report_id": report_id,
"status": "completed",
"pdf_url": pdf_url,
"file_size": len(pdf_bytes),
}
except Exception as e:
# В случае ошибки
self.update_state(
state="FAILURE",
meta={
"status": f"Ошибка при генерации PDF: {str(e)}",
"progress": 0,
"error": str(e),
},
)
raise Exception(f"Ошибка при генерации PDF: {str(e)}")

View File

@ -1,3 +1,5 @@
import asyncio
import sys
from contextlib import asynccontextmanager
from fastapi import FastAPI
@ -7,9 +9,9 @@ from app.core.session_middleware import SessionMiddleware
from app.routers import resume_router, vacancy_router
from app.routers.admin_router import router as admin_router
from app.routers.analysis_router import router as analysis_router
from app.routers.interview_reports_router import router as interview_report_router
from app.routers.interview_router import router as interview_router
from app.routers.session_router import router as session_router
from app.routers.interview_reports_router import router as interview_report_router
@asynccontextmanager
@ -17,6 +19,9 @@ async def lifespan(app: FastAPI):
# Запускаем AI агента при старте приложения
from app.services.agent_manager import agent_manager
if sys.platform.startswith("win"):
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
print("[STARTUP] Starting AI Agent...")
success = await agent_manager.start_agent()
@ -59,6 +64,7 @@ app.include_router(analysis_router, prefix="/api/v1")
app.include_router(admin_router, prefix="/api/v1")
app.include_router(interview_report_router, prefix="/api/v1")
@app.get("/")
async def root():
return {"message": "HR AI Backend API", "version": "1.0.0"}

View File

@ -0,0 +1,52 @@
"""Add interview session resume relationship and timing fields
Revision ID: efeebe53c76c
Revises: 86cfa6ee73af
Create Date: 2025-09-09 00:13:58.304145
"""
from collections.abc import Sequence
import sqlalchemy as sa
from alembic import op
# revision identifiers, used by Alembic.
revision: str = "efeebe53c76c"
down_revision: str | Sequence[str] | None = "86cfa6ee73af"
branch_labels: str | Sequence[str] | None = None
depends_on: str | Sequence[str] | None = None
def upgrade() -> None:
"""Upgrade schema."""
# ### commands auto generated by Alembic - please adjust! ###
op.add_column(
"interview_sessions",
sa.Column("interview_start_time", sa.DateTime(), nullable=True),
)
op.add_column(
"interview_sessions",
sa.Column("interview_end_time", sa.DateTime(), nullable=True),
)
op.alter_column(
"vacancy", "company_name", existing_type=sa.VARCHAR(length=255), nullable=True
)
op.alter_column(
"vacancy", "area_name", existing_type=sa.VARCHAR(length=255), nullable=True
)
# ### end Alembic commands ###
def downgrade() -> None:
"""Downgrade schema."""
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column(
"vacancy", "area_name", existing_type=sa.VARCHAR(length=255), nullable=False
)
op.alter_column(
"vacancy", "company_name", existing_type=sa.VARCHAR(length=255), nullable=False
)
op.drop_column("interview_sessions", "interview_end_time")
op.drop_column("interview_sessions", "interview_start_time")
# ### end Alembic commands ###

View File

@ -34,6 +34,8 @@ dependencies = [
"pdfkit>=1.0.0",
"jinja2>=3.1.6",
"greenlet>=3.2.4",
"xhtml2pdf>=0.2.17",
"playwright>=1.55.0",
]
[build-system]

View File

@ -31,9 +31,7 @@ class ModelRegistry:
"""Получить или создать chat модель"""
if self._chat_model is None:
if settings.openai_api_key:
llm = ChatOpenAI(
api_key=settings.openai_api_key, model="gpt-5-mini"
)
llm = ChatOpenAI(api_key=settings.openai_api_key, model="gpt-5-mini")
self._chat_model = ChatModel(llm)
else:
raise ValueError("OpenAI API key не настроен в settings")

Binary file not shown.

BIN
static/fonts/DejaVuSans.ttf Normal file

Binary file not shown.

260
uv.lock
View File

@ -154,6 +154,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/6f/12/e5e0282d673bb9746bacfb6e2dba8719989d3660cdb2ea79aee9a9651afb/anyio-4.10.0-py3-none-any.whl", hash = "sha256:60e474ac86736bbfd6f210f7a61218939c318f43f9972497381f1c5e930ed3d1", size = 107213 },
]
[[package]]
name = "arabic-reshaper"
version = "3.0.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/29/27/9f488e21f87fd8b7ff3b52c372b9510c619ecf1398e4ba30d5f4becc7d86/arabic_reshaper-3.0.0.tar.gz", hash = "sha256:ffcd13ba5ec007db71c072f5b23f420da92ac7f268512065d49e790e62237099", size = 23420 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/44/fb/e20b45d81d74d810b01bff408baf8af04abf1d55a1a289c8395ad0919a7c/arabic_reshaper-3.0.0-py3-none-any.whl", hash = "sha256:3f71d5034bb694204a239a6f1ebcf323ac3c5b059de02259235e2016a1a5e2dc", size = 20364 },
]
[[package]]
name = "argcomplete"
version = "3.6.2"
@ -163,6 +172,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/31/da/e42d7a9d8dd33fa775f467e4028a47936da2f01e4b0e561f9ba0d74cb0ca/argcomplete-3.6.2-py3-none-any.whl", hash = "sha256:65b3133a29ad53fb42c48cf5114752c7ab66c1c38544fdf6460f450c09b42591", size = 43708 },
]
[[package]]
name = "asn1crypto"
version = "1.5.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/de/cf/d547feed25b5244fcb9392e288ff9fdc3280b10260362fc45d37a798a6ee/asn1crypto-1.5.1.tar.gz", hash = "sha256:13ae38502be632115abf8a24cbe5f4da52e3b5231990aff31123c805306ccb9c", size = 121080 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c9/7f/09065fd9e27da0eda08b4d6897f1c13535066174cc023af248fc2a8d5e5a/asn1crypto-1.5.1-py2.py3-none-any.whl", hash = "sha256:db4e40728b728508912cbb3d44f19ce188f218e9eba635821bb4b68564f8fd67", size = 105045 },
]
[[package]]
name = "async-timeout"
version = "5.0.1"
@ -617,6 +635,19 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/23/87/7ce86f3fa14bc11a5a48c30d8103c26e09b6465f8d8e9d74cf7a0714f043/cryptography-45.0.7-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:1f3d56f73595376f4244646dd5c5870c14c196949807be39e79e7bd9bac3da63", size = 3332908 },
]
[[package]]
name = "cssselect2"
version = "0.8.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "tinycss2" },
{ name = "webencodings" },
]
sdist = { url = "https://files.pythonhosted.org/packages/9f/86/fd7f58fc498b3166f3a7e8e0cddb6e620fe1da35b02248b1bd59e95dbaaa/cssselect2-0.8.0.tar.gz", hash = "sha256:7674ffb954a3b46162392aee2a3a0aedb2e14ecf99fcc28644900f4e6e3e9d3a", size = 35716 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/0f/e7/aa315e6a749d9b96c2504a1ba0ba031ba2d0517e972ce22682e3fccecb09/cssselect2-0.8.0-py3-none-any.whl", hash = "sha256:46fc70ebc41ced7a32cd42d58b1884d72ade23d21e5a4eaaf022401c13f0e76e", size = 15454 },
]
[[package]]
name = "dataclasses-json"
version = "0.6.7"
@ -995,6 +1026,7 @@ dependencies = [
{ name = "comtypes" },
{ name = "docx2txt" },
{ name = "fastapi", extra = ["standard"] },
{ name = "greenlet" },
{ name = "jinja2" },
{ name = "langchain" },
{ name = "langchain-community" },
@ -1006,6 +1038,7 @@ dependencies = [
{ name = "livekit-api" },
{ name = "pdfkit" },
{ name = "pdfplumber" },
{ name = "playwright" },
{ name = "psycopg2-binary" },
{ name = "pydantic-settings" },
{ name = "python-docx" },
@ -1016,6 +1049,7 @@ dependencies = [
{ name = "sqlmodel" },
{ name = "textract" },
{ name = "uvicorn", extra = ["standard"] },
{ name = "xhtml2pdf" },
{ name = "yandex-speechkit" },
]
@ -1037,6 +1071,7 @@ requires-dist = [
{ name = "comtypes", specifier = ">=1.4.12" },
{ name = "docx2txt", specifier = ">=0.9" },
{ name = "fastapi", extras = ["standard"], specifier = ">=0.104.0" },
{ name = "greenlet", specifier = ">=3.2.4" },
{ name = "jinja2", specifier = ">=3.1.6" },
{ name = "langchain", specifier = ">=0.1.0" },
{ name = "langchain-community", specifier = ">=0.0.10" },
@ -1048,6 +1083,7 @@ requires-dist = [
{ name = "livekit-api", specifier = ">=1.0.5" },
{ name = "pdfkit", specifier = ">=1.0.0" },
{ name = "pdfplumber", specifier = ">=0.10.0" },
{ name = "playwright", specifier = ">=1.55.0" },
{ name = "psycopg2-binary", specifier = ">=2.9.0" },
{ name = "pydantic-settings", specifier = ">=2.1.0" },
{ name = "python-docx", specifier = ">=1.2.0" },
@ -1058,6 +1094,7 @@ requires-dist = [
{ name = "sqlmodel", specifier = ">=0.0.14" },
{ name = "textract", specifier = ">=1.5.0" },
{ name = "uvicorn", extras = ["standard"], specifier = ">=0.24.0" },
{ name = "xhtml2pdf", specifier = ">=0.2.17" },
{ name = "yandex-speechkit", specifier = ">=1.5.0" },
]
@ -1070,6 +1107,19 @@ dev = [
{ name = "ruff", specifier = ">=0.12.12" },
]
[[package]]
name = "html5lib"
version = "1.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "six" },
{ name = "webencodings" },
]
sdist = { url = "https://files.pythonhosted.org/packages/ac/b6/b55c3f49042f1df3dcd422b7f224f939892ee94f22abcf503a9b7339eaf2/html5lib-1.1.tar.gz", hash = "sha256:b2e5b40261e20f354d198eae92afc10d750afb487ed5e50f9c4eaf07c184146f", size = 272215 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/6c/dd/a834df6482147d48e225a49515aabc28974ad5a4ca3215c18a882565b028/html5lib-1.1-py2.py3-none-any.whl", hash = "sha256:0d78f8fde1c230e99fe37986a60526d7049ed4bf8a9fadbad5f00e22e58e041d", size = 112173 },
]
[[package]]
name = "httpcore"
version = "1.0.9"
@ -2288,6 +2338,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/28/01/d6b274a0635be0468d4dbd9cafe80c47105937a0d42434e805e67cd2ed8b/orjson-3.11.3-cp314-cp314-win_arm64.whl", hash = "sha256:e8f6a7a27d7b7bec81bd5924163e9af03d49bbb63013f107b48eb5d16db711bc", size = 125985 },
]
[[package]]
name = "oscrypto"
version = "1.3.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "asn1crypto" },
]
sdist = { url = "https://files.pythonhosted.org/packages/06/81/a7654e654a4b30eda06ef9ad8c1b45d1534bfd10b5c045d0c0f6b16fecd2/oscrypto-1.3.0.tar.gz", hash = "sha256:6f5fef59cb5b3708321db7cca56aed8ad7e662853351e7991fcf60ec606d47a4", size = 184590 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/01/7c/fa07d3da2b6253eb8474be16eab2eadf670460e364ccc895ca7ff388ee30/oscrypto-1.3.0-py2.py3-none-any.whl", hash = "sha256:2b2f1d2d42ec152ca90ccb5682f3e051fb55986e1b170ebde472b133713e7085", size = 194553 },
]
[[package]]
name = "packaging"
version = "25.0"
@ -2467,6 +2529,25 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/34/e7/ae39f538fd6844e982063c3a5e4598b8ced43b9633baa3a85ef33af8c05c/pillow-11.3.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:c84d689db21a1c397d001aa08241044aa2069e7587b398c8cc63020390b1c1b8", size = 6984598 },
]
[[package]]
name = "playwright"
version = "1.55.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "greenlet" },
{ name = "pyee" },
]
wheels = [
{ url = "https://files.pythonhosted.org/packages/80/3a/c81ff76df266c62e24f19718df9c168f49af93cabdbc4608ae29656a9986/playwright-1.55.0-py3-none-macosx_10_13_x86_64.whl", hash = "sha256:d7da108a95001e412effca4f7610de79da1637ccdf670b1ae3fdc08b9694c034", size = 40428109 },
{ url = "https://files.pythonhosted.org/packages/cf/f5/bdb61553b20e907196a38d864602a9b4a461660c3a111c67a35179b636fa/playwright-1.55.0-py3-none-macosx_11_0_arm64.whl", hash = "sha256:8290cf27a5d542e2682ac274da423941f879d07b001f6575a5a3a257b1d4ba1c", size = 38687254 },
{ url = "https://files.pythonhosted.org/packages/4a/64/48b2837ef396487807e5ab53c76465747e34c7143fac4a084ef349c293a8/playwright-1.55.0-py3-none-macosx_11_0_universal2.whl", hash = "sha256:25b0d6b3fd991c315cca33c802cf617d52980108ab8431e3e1d37b5de755c10e", size = 40428108 },
{ url = "https://files.pythonhosted.org/packages/08/33/858312628aa16a6de97839adc2ca28031ebc5391f96b6fb8fdf1fcb15d6c/playwright-1.55.0-py3-none-manylinux1_x86_64.whl", hash = "sha256:c6d4d8f6f8c66c483b0835569c7f0caa03230820af8e500c181c93509c92d831", size = 45905643 },
{ url = "https://files.pythonhosted.org/packages/83/83/b8d06a5b5721931aa6d5916b83168e28bd891f38ff56fe92af7bdee9860f/playwright-1.55.0-py3-none-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:29a0777c4ce1273acf90c87e4ae2fe0130182100d99bcd2ae5bf486093044838", size = 45296647 },
{ url = "https://files.pythonhosted.org/packages/06/2e/9db64518aebcb3d6ef6cd6d4d01da741aff912c3f0314dadb61226c6a96a/playwright-1.55.0-py3-none-win32.whl", hash = "sha256:29e6d1558ad9d5b5c19cbec0a72f6a2e35e6353cd9f262e22148685b86759f90", size = 35476046 },
{ url = "https://files.pythonhosted.org/packages/46/4f/9ba607fa94bb9cee3d4beb1c7b32c16efbfc9d69d5037fa85d10cafc618b/playwright-1.55.0-py3-none-win_amd64.whl", hash = "sha256:7eb5956473ca1951abb51537e6a0da55257bb2e25fc37c2b75af094a5c93736c", size = 35476048 },
{ url = "https://files.pythonhosted.org/packages/21/98/5ca173c8ec906abde26c28e1ecb34887343fd71cc4136261b90036841323/playwright-1.55.0-py3-none-win_arm64.whl", hash = "sha256:012dc89ccdcbd774cdde8aeee14c08e0dd52ddb9135bf10e9db040527386bd76", size = 31225543 },
]
[[package]]
name = "pluggy"
version = "1.6.0"
@ -2759,6 +2840,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/a6/53/d78dc063216e62fc55f6b2eebb447f6a4b0a59f55c8406376f76bf959b08/pydub-0.25.1-py2.py3-none-any.whl", hash = "sha256:65617e33033874b59d87db603aa1ed450633288aefead953b30bded59cb599a6", size = 32327 },
]
[[package]]
name = "pyee"
version = "13.0.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "typing-extensions" },
]
sdist = { url = "https://files.pythonhosted.org/packages/95/03/1fd98d5841cd7964a27d729ccf2199602fe05eb7a405c1462eb7277945ed/pyee-13.0.0.tar.gz", hash = "sha256:b391e3c5a434d1f5118a25615001dbc8f669cf410ab67d04c4d4e07c55481c37", size = 31250 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/9b/4d/b9add7c84060d4c1906abe9a7e5359f2a60f7a9a4f67268b2766673427d8/pyee-13.0.0-py3-none-any.whl", hash = "sha256:48195a3cddb3b1515ce0695ed76036b5ccc2ef3a9f963ff9f77aec0139845498", size = 15730 },
]
[[package]]
name = "pygments"
version = "2.19.2"
@ -2768,6 +2861,40 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/c7/21/705964c7812476f378728bdf590ca4b771ec72385c533964653c68e86bdc/pygments-2.19.2-py3-none-any.whl", hash = "sha256:86540386c03d588bb81d44bc3928634ff26449851e99741617ecb9037ee5ec0b", size = 1225217 },
]
[[package]]
name = "pyhanko"
version = "0.30.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "asn1crypto" },
{ name = "cryptography" },
{ name = "lxml" },
{ name = "pyhanko-certvalidator" },
{ name = "pyyaml" },
{ name = "requests" },
{ name = "tzlocal" },
]
sdist = { url = "https://files.pythonhosted.org/packages/4c/c4/06672abd225149dde9302d64e8962abc2b5aca4bba4c50388005fa32ab90/pyhanko-0.30.0.tar.gz", hash = "sha256:efaa9e5401d4912fa5b2aeb4cdbe729196d98dae0671bd6d37a824dc6fde5ca4", size = 405860 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/6f/73/c1b4f69d25ab00552a49d180783b7de29a8313b30a31049028f54a01ac69/pyhanko-0.30.0-py3-none-any.whl", hash = "sha256:cd65837b42c5ce3fbd88d1996b0cd44895cd634fc7cf12764b9b56ec100b9994", size = 465232 },
]
[[package]]
name = "pyhanko-certvalidator"
version = "0.28.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "asn1crypto" },
{ name = "cryptography" },
{ name = "oscrypto" },
{ name = "requests" },
{ name = "uritools" },
]
sdist = { url = "https://files.pythonhosted.org/packages/f9/be/9ffcb4cb17f223589579b3dd005d1004e9586d58730c8bbc688ffd563e19/pyhanko_certvalidator-0.28.0.tar.gz", hash = "sha256:6b2911520a3e9cf24a640f67488fadac82ad3818f4256ddfb7e8fa1fada80f2d", size = 93049 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/61/3c/d4a5f18d21962c8c3626c3c4d7a8774a3d649923c18e717f7f1ca471c126/pyhanko_certvalidator-0.28.0-py3-none-any.whl", hash = "sha256:37d02f61974175843ce36b467c0d9d7eae78caa6e356beeb753360c351494dc2", size = 111617 },
]
[[package]]
name = "pyjwt"
version = "2.10.1"
@ -2795,6 +2922,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d4/1a/8b677e0f4ef683bbfb00d495960573fff0844ed509b3cf0abede79a48e90/pymilvus-2.6.1-py3-none-any.whl", hash = "sha256:e3d76d45ce04d3555a6849645a18a1e2992706e248d5b6dc58a00504d0b60165", size = 254252 },
]
[[package]]
name = "pypdf"
version = "6.0.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/20/ac/a300a03c3b34967c050677ccb16e7a4b65607ee5df9d51e8b6d713de4098/pypdf-6.0.0.tar.gz", hash = "sha256:282a99d2cc94a84a3a3159f0d9358c0af53f85b4d28d76ea38b96e9e5ac2a08d", size = 5033827 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/2c/83/2cacc506eb322bb31b747bc06ccb82cc9aa03e19ee9c1245e538e49d52be/pypdf-6.0.0-py3-none-any.whl", hash = "sha256:56ea60100ce9f11fc3eec4f359da15e9aec3821b036c1f06d2b660d35683abb8", size = 310465 },
]
[[package]]
name = "pypdfium2"
version = "4.30.0"
@ -2852,6 +2988,56 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/c7/9d/bf86eddabf8c6c9cb1ea9a869d6873b46f105a5d292d3a6f7071f5b07935/pytest_asyncio-1.1.0-py3-none-any.whl", hash = "sha256:5fe2d69607b0bd75c656d1211f969cadba035030156745ee09e7d71740e58ecf", size = 15157 },
]
[[package]]
name = "python-bidi"
version = "0.6.6"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/c4/de/1822200711beaadb2f334fa25f59ad9c2627de423c103dde7e81aedbc8e2/python_bidi-0.6.6.tar.gz", hash = "sha256:07db4c7da502593bd6e39c07b3a38733704070de0cbf92a7b7277b7be8867dd9", size = 45102 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/bb/03/b10c5c320fa5f3bc3d7736b2268179cc7f4dca4d054cdf2c932532d6b11a/python_bidi-0.6.6-cp311-cp311-macosx_10_12_x86_64.whl", hash = "sha256:da4949496e563b51f53ff34aad5a9f4c3aaf06f4180cf3bcb42bec649486c8f1", size = 269512 },
{ url = "https://files.pythonhosted.org/packages/91/d8/8f6bd8f4662e8340e1aabb3b9a01fb1de24e8d1ce4f38b160f5cac2524f4/python_bidi-0.6.6-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:c48a755ca8ba3f2b242d6795d4a60e83ca580cc4fa270a3aaa8af05d93b7ba7f", size = 264042 },
{ url = "https://files.pythonhosted.org/packages/51/9f/2c831510ab8afb03b5ec4b15271dc547a2e8643563a7bcc712cd43b29d26/python_bidi-0.6.6-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:76a1cd320993ba3e91a567e97f057a03f2c6b493096b3fff8b5630f51a38e7eb", size = 290963 },
{ url = "https://files.pythonhosted.org/packages/95/45/17a76e7052d4d4bc1549ac2061f1fdebbaa9b7448ce81e774b7f77dc70b2/python_bidi-0.6.6-cp311-cp311-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:e8bf3e396f9ebe8f4f81e92fa4c98c50160d60c58964b89c8ff4ee0c482befaa", size = 298639 },
{ url = "https://files.pythonhosted.org/packages/00/11/fb5857168dcc50a2ebb2a5d8771a64b7fc66c19c9586b6f2a4d8a76db2e8/python_bidi-0.6.6-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:a2a49b506ed21f762ebf332de6de689bc4912e24dcc3b85f120b34e5f01e541a", size = 351898 },
{ url = "https://files.pythonhosted.org/packages/18/e7/d25b3e767e204b9e236e7cb042bf709fd5a985cfede8c990da3bbca862a3/python_bidi-0.6.6-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:3428331e7ce0d58c15b5a57e18a43a12e28f8733086066e6fd75b0ded80e1cae", size = 331117 },
{ url = "https://files.pythonhosted.org/packages/75/50/248decd41096b4954c3887fc7fae864b8e1e90d28d1b4ce5a28c087c3d8d/python_bidi-0.6.6-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:35adfb9fed3e72b9043a5c00b6ab69e4b33d53d2d8f8b9f60d4df700f77bc2c0", size = 292950 },
{ url = "https://files.pythonhosted.org/packages/0b/d8/6ae7827fbba1403882930d4da8cbab28ab6b86b61a381c991074fb5003d1/python_bidi-0.6.6-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:589c5b24a8c4b5e07a1e97654020734bf16ed01a4353911ab663a37aaf1c281d", size = 307909 },
{ url = "https://files.pythonhosted.org/packages/4c/a3/5b369c5da7b08b36907dcce7a78c730370ad6899459282f5e703ec1964c6/python_bidi-0.6.6-cp311-cp311-musllinux_1_2_aarch64.whl", hash = "sha256:994534e47260d712c3b3291a6ab55b46cdbfd78a879ef95d14b27bceebfd4049", size = 465552 },
{ url = "https://files.pythonhosted.org/packages/82/07/7779668967c0f17a107a916ec7891507b7bcdc9c7ee4d2c4b6a80ba1ac5e/python_bidi-0.6.6-cp311-cp311-musllinux_1_2_armv7l.whl", hash = "sha256:00622f54a80826a918b22a2d6d5481bb3f669147e17bac85c81136b6ffbe7c06", size = 557371 },
{ url = "https://files.pythonhosted.org/packages/2d/e5/3154ac009a167bf0811195f12cf5e896c77a29243522b4b0697985881bc4/python_bidi-0.6.6-cp311-cp311-musllinux_1_2_i686.whl", hash = "sha256:965e6f2182e7b9352f2d79221f6c49502a307a9778d7d87d82dc36bb1ffecbab", size = 485458 },
{ url = "https://files.pythonhosted.org/packages/fd/db/88af6f0048d8ec7281b44b5599a3d2afa18fac5dd22eb72526f28f4ea647/python_bidi-0.6.6-cp311-cp311-musllinux_1_2_x86_64.whl", hash = "sha256:53d7d3a550d176df99dd0bb0cc2da16b40634f11c8b9f5715777441d679c0a62", size = 459588 },
{ url = "https://files.pythonhosted.org/packages/bb/d2/77b649c8b32c2b88e2facf5a42fb51dfdcc9e13db411c8bc84831ad64893/python_bidi-0.6.6-cp311-cp311-win32.whl", hash = "sha256:b271cd05cb40f47eb4600de79a8e47f8579d81ce35f5650b39b7860d018c3ece", size = 155683 },
{ url = "https://files.pythonhosted.org/packages/95/41/d4dbc72b96e2eea3aeb9292707459372c8682ef039cd19fcac7e09d513ef/python_bidi-0.6.6-cp311-cp311-win_amd64.whl", hash = "sha256:4ff1eba0ff87e04bd35d7e164203ad6e5ce19f0bac0bdf673134c0b78d919608", size = 160587 },
{ url = "https://files.pythonhosted.org/packages/6f/84/45484b091e89d657b0edbfc4378d94ae39915e1f230cb13614f355ff7f22/python_bidi-0.6.6-cp312-cp312-macosx_10_12_x86_64.whl", hash = "sha256:166060a31c10aa3ffadd52cf10a3c9c2b8d78d844e0f2c5801e2ed511d3ec316", size = 267218 },
{ url = "https://files.pythonhosted.org/packages/b7/17/b314c260366a8fb370c58b98298f903fb2a3c476267efbe792bb8694ac7c/python_bidi-0.6.6-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:8706addd827840c2c3b3a9963060d9b979b43801cc9be982efa9644facd3ed26", size = 262129 },
{ url = "https://files.pythonhosted.org/packages/27/b6/8212d0f83aaa361ab33f98c156a453ea5cfb9ac40fab06eef9a156ba4dfa/python_bidi-0.6.6-cp312-cp312-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:69c02316a4f72a168ea6f66b90d845086e2f2d2de6b08eb32c576db36582177c", size = 290811 },
{ url = "https://files.pythonhosted.org/packages/cd/05/cd503307cd478d18f09b301d20e38ef4107526e65e9cbb9ce489cc2ddbf3/python_bidi-0.6.6-cp312-cp312-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:a525bcb77b8edbfdcf8b199dbed24556e6d1436af8f5fa392f6cdc93ed79b4af", size = 298175 },
{ url = "https://files.pythonhosted.org/packages/e0/0c/bd7bbd70bd330f282c534f03235a9b8da56262ea97a353d8fe9e367d0d7c/python_bidi-0.6.6-cp312-cp312-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bb186c8da4bdc953893504bba93f41d5b412fd767ba5661ff606f22950ec609", size = 351470 },
{ url = "https://files.pythonhosted.org/packages/5e/ab/05a1864d5317e69e022930457f198c2d0344fd281117499ad3fedec5b77c/python_bidi-0.6.6-cp312-cp312-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:25fa21b46dc80ac7099d2dee424b634eb1f76b2308d518e505a626c55cdbf7b1", size = 329468 },
{ url = "https://files.pythonhosted.org/packages/07/7c/094bbcb97089ac79f112afa762051129c55d52a7f58923203dfc62f75feb/python_bidi-0.6.6-cp312-cp312-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b31f5562839e7ecea881ba337f9d39716e2e0e6b3ba395e824620ee5060050ff", size = 292102 },
{ url = "https://files.pythonhosted.org/packages/99/6b/5e2e6c2d76e7669b9dd68227e8e70cf72a6566ffdf414b31b64098406030/python_bidi-0.6.6-cp312-cp312-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:fb750d3d5ac028e8afd62d000928a2110dbca012fee68b1a325a38caa03dc50b", size = 307282 },
{ url = "https://files.pythonhosted.org/packages/5e/da/6cbe04f605100978755fc5f4d8a8209789b167568e1e08e753d1a88edcc5/python_bidi-0.6.6-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:8b5f648ee8e9f4ac0400f71e671934b39837d7031496e0edde867a303344d758", size = 464487 },
{ url = "https://files.pythonhosted.org/packages/d5/83/d15a0c944b819b8f101418b973772c42fb818c325c82236978db71b1ed7e/python_bidi-0.6.6-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c4c0255940e6ff98fb05f9d5de3ffcaab7b60d821d4ca072b50c4f871b036562", size = 556449 },
{ url = "https://files.pythonhosted.org/packages/0f/9a/80f0551adcbc9dd02304a4e4ae46113bb1f6f5172831ad86b860814ff498/python_bidi-0.6.6-cp312-cp312-musllinux_1_2_i686.whl", hash = "sha256:e7e36601edda15e67527560b1c00108b0d27831260b6b251cf7c6dd110645c03", size = 484368 },
{ url = "https://files.pythonhosted.org/packages/9e/05/4a4074530e54a3e384535d185c77fe9bf0321b207bfcb3a9c1676ee9976f/python_bidi-0.6.6-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:07c9f000671b187319bacebb9e98d8b75005ccd16aa41b9d4411e66813c467bb", size = 458846 },
{ url = "https://files.pythonhosted.org/packages/9f/10/91d112d152b273e54ca7b7d476faaf27e9a350ef85b4fcc281bdd577d13b/python_bidi-0.6.6-cp312-cp312-win32.whl", hash = "sha256:57c0ca449a116c4f804422111b3345281c4e69c733c4556fa216644ec9907078", size = 155236 },
{ url = "https://files.pythonhosted.org/packages/30/da/e1537900bc8a838b0637124cf8f7ef36ce87b5cdc41fb4c26752a4b9c25a/python_bidi-0.6.6-cp312-cp312-win_amd64.whl", hash = "sha256:f60afe457a37bd908fdc7b520c07620b1a7cc006e08b6e3e70474025b4f5e5c7", size = 160251 },
{ url = "https://files.pythonhosted.org/packages/a5/b1/b24cb64b441dadd911b39d8b86a91606481f84be1b3f01ffca3f9847a4f1/python_bidi-0.6.6-cp313-cp313-macosx_10_12_x86_64.whl", hash = "sha256:61cf12f6b7d0b9bb37838a5f045e6acbd91e838b57f0369c55319bb3969ffa4d", size = 266728 },
{ url = "https://files.pythonhosted.org/packages/0c/19/d4d449dcdc5eb72b6ffb97b34db710ea307682cae065fbe83a0e42fee00a/python_bidi-0.6.6-cp313-cp313-macosx_11_0_arm64.whl", hash = "sha256:33bd0ba5eedf18315a1475ac0f215b5134e48011b7320aedc2fb97df31d4e5bf", size = 261475 },
{ url = "https://files.pythonhosted.org/packages/0a/87/4ecaecf7cc17443129b0f3a967b6f455c0d773b58d68b93c5949a91a0b8b/python_bidi-0.6.6-cp313-cp313-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5c9f798dd49b24bb1a9d90f065ef25c7bffa94c04c554f1fc02d0aea0a9b10b0", size = 290153 },
{ url = "https://files.pythonhosted.org/packages/42/6e/4b57a3dba455f42fa82a9b5caf3d35535bd6eb644a37a031ac1d5e8b6a3e/python_bidi-0.6.6-cp313-cp313-manylinux_2_17_armv7l.manylinux2014_armv7l.whl", hash = "sha256:43a0409570c618d93706dc875b1d33b4adfe67144f6f2ebeb32d85d8bbdb85ed", size = 297567 },
{ url = "https://files.pythonhosted.org/packages/39/39/dc9ce9b15888b6391206d77fc36fd23447fb5313aee1fa1031432b2a4072/python_bidi-0.6.6-cp313-cp313-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ada1aecd32773c61b16f7c9f74d9ec1b57ea433e2083e08ca387c5cd4b0ceaed", size = 351186 },
{ url = "https://files.pythonhosted.org/packages/9e/66/cc9795903be4ce781b89fa4fe0e493369d58cd0fc0dda9287ab227d410d3/python_bidi-0.6.6-cp313-cp313-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:125a815f2b20313a2f6d331aa84abdd07de7d270985b056e6729390a4cda90df", size = 329159 },
{ url = "https://files.pythonhosted.org/packages/ca/40/071dc08645daa09cb8c008db888141998a895d2d1ed03ba780971b595297/python_bidi-0.6.6-cp313-cp313-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:183fee39bd2de787f632376bd5ba0d5f1daf6a09d3ebfaa211df25d62223e531", size = 291743 },
{ url = "https://files.pythonhosted.org/packages/17/5a/5f60915a9f73f48df27bf262a210fa66ea8ffe5fd0072c67288e55e3304e/python_bidi-0.6.6-cp313-cp313-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:c4e08753d32d633f5ecb5eb02624272eeffaa6d5c6f4f9ddf012637bcaabfc0a", size = 306568 },
{ url = "https://files.pythonhosted.org/packages/9e/01/03341516d895ee937036d38ab4f9987857b1066f7c267b99963ee056eb9e/python_bidi-0.6.6-cp313-cp313-musllinux_1_2_aarch64.whl", hash = "sha256:d1dcd7a82ae00b86821fce627e310791f56da90924f15877cfda844e340679de", size = 463890 },
{ url = "https://files.pythonhosted.org/packages/4f/a8/36bb9553e00d33acee2d2d447b60bccb0aad5c1d589cd364ddd95d9b876b/python_bidi-0.6.6-cp313-cp313-musllinux_1_2_armv7l.whl", hash = "sha256:5506ba56380140b3cb3504029de014d21eb8874c5e081d88495f8775f6ed90bc", size = 555980 },
{ url = "https://files.pythonhosted.org/packages/46/05/88aa85522472afda215a6b436eaa0aac6bbe9e29a64db0f99f61d1aa6527/python_bidi-0.6.6-cp313-cp313-musllinux_1_2_i686.whl", hash = "sha256:207b0a7082ec38045910d37700a0dd73c10d4ffccb22a4fd0391d7e9ce241672", size = 483881 },
{ url = "https://files.pythonhosted.org/packages/48/7e/f813de1a92e10c302649134ea3a8c6429f9c2e5dd161e82e88f08b4c7565/python_bidi-0.6.6-cp313-cp313-musllinux_1_2_x86_64.whl", hash = "sha256:686642a52acdeffb1d9a593a284d07b175c63877c596fa3ccceeb2649ced1dd8", size = 458296 },
{ url = "https://files.pythonhosted.org/packages/e9/ea/a775bec616ec01d9a0df7d5a6e1b3729285dd5e7f1fdb0dfce2e0604c6a3/python_bidi-0.6.6-cp313-cp313-win32.whl", hash = "sha256:485f2ee109e7aa73efc165b90a6d90da52546801413540c08b7133fe729d5e0a", size = 155033 },
{ url = "https://files.pythonhosted.org/packages/74/79/3323f08c98b9a5b726303b68babdd26cf4fe710709b7c61c96e6bb4f3d10/python_bidi-0.6.6-cp313-cp313-win_amd64.whl", hash = "sha256:63f7a9eaec31078e7611ab958b6e18e796c05b63ca50c1f7298311dc1e15ac3e", size = 159973 },
]
[[package]]
name = "python-dateutil"
version = "2.9.0.post0"
@ -3399,6 +3585,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/ce/fd/901cfa59aaa5b30a99e16876f11abe38b59a1a2c51ffb3d7142bb6089069/starlette-0.47.3-py3-none-any.whl", hash = "sha256:89c0778ca62a76b826101e7c709e70680a1699ca7da6b44d38eb0a7e61fe4b51", size = 72991 },
]
[[package]]
name = "svglib"
version = "1.5.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "cssselect2" },
{ name = "lxml" },
{ name = "reportlab" },
{ name = "tinycss2" },
]
sdist = { url = "https://files.pythonhosted.org/packages/56/5b/53ca0fd447f73423c7dc59d34e523530ef434481a3d18808ff7537ad33ec/svglib-1.5.1.tar.gz", hash = "sha256:3ae765d3a9409ee60c0fb4d24c2deb6a80617aa927054f5bcd7fc98f0695e587", size = 913900 }
[[package]]
name = "sympy"
version = "1.14.0"
@ -3467,6 +3665,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/50/79/bcf350609f3a10f09fe4fc207f132085e497fdd3612f3925ab24d86a0ca0/tiktoken-0.11.0-cp313-cp313-win_amd64.whl", hash = "sha256:2177ffda31dec4023356a441793fed82f7af5291120751dee4d696414f54db0c", size = 883901 },
]
[[package]]
name = "tinycss2"
version = "1.4.0"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "webencodings" },
]
sdist = { url = "https://files.pythonhosted.org/packages/7a/fd/7a5ee21fd08ff70d3d33a5781c255cbe779659bd03278feb98b19ee550f4/tinycss2-1.4.0.tar.gz", hash = "sha256:10c0972f6fc0fbee87c3edb76549357415e94548c1ae10ebccdea16fb404a9b7", size = 87085 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/e6/34/ebdc18bae6aa14fbee1a08b63c015c72b64868ff7dae68808ab500c492e2/tinycss2-1.4.0-py3-none-any.whl", hash = "sha256:3a49cf47b7675da0b15d0c6e1df8df4ebd96e9394bb905a5775adb0d884c5289", size = 26610 },
]
[[package]]
name = "tokenizers"
version = "0.22.0"
@ -3592,6 +3802,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/5c/23/c7abc0ca0a1526a0774eca151daeb8de62ec457e77262b66b359c3c7679e/tzdata-2025.2-py2.py3-none-any.whl", hash = "sha256:1a403fada01ff9221ca8044d701868fa132215d84beb92242d9acd2147f667a8", size = 347839 },
]
[[package]]
name = "tzlocal"
version = "5.3.1"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "tzdata", marker = "sys_platform == 'win32'" },
]
sdist = { url = "https://files.pythonhosted.org/packages/8b/2e/c14812d3d4d9cd1773c6be938f89e5735a1f11a9f184ac3639b93cef35d5/tzlocal-5.3.1.tar.gz", hash = "sha256:cceffc7edecefea1f595541dbd6e990cb1ea3d19bf01b2809f362a03dd7921fd", size = 30761 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/c2/14/e2a54fabd4f08cd7af1c07030603c3356b74da07f7cc056e600436edfa17/tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d", size = 18026 },
]
[[package]]
name = "ujson"
version = "5.11.0"
@ -3661,6 +3883,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/52/5b/8c5e33228f7f83f05719964db59f3f9f276d272dc43752fa3bbf0df53e7b/ujson-5.11.0-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:416389ec19ef5f2013592f791486bef712ebce0cd59299bf9df1ba40bb2f6e04", size = 43835 },
]
[[package]]
name = "uritools"
version = "5.0.0"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/36/b1/e482d43db3209663b82a59e37cf31f641254180190667c6b0bf18a297de8/uritools-5.0.0.tar.gz", hash = "sha256:68180cad154062bd5b5d9ffcdd464f8de6934414b25462ae807b00b8df9345de", size = 22730 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/8c/74/0987d204b5fbf83861affa6b36a20da22cb3fe708583b955c99ab834bd5a/uritools-5.0.0-py3-none-any.whl", hash = "sha256:cead3a49ba8fbca3f91857343849d506d8639718f4a2e51b62e87393b493bd6f", size = 10432 },
]
[[package]]
name = "urllib3"
version = "2.5.0"
@ -3822,6 +4053,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/fd/84/fd2ba7aafacbad3c4201d395674fc6348826569da3c0937e75505ead3528/wcwidth-0.2.13-py2.py3-none-any.whl", hash = "sha256:3da69048e4540d84af32131829ff948f1e022c1c6bdb8d6102117aac784f6859", size = 34166 },
]
[[package]]
name = "webencodings"
version = "0.5.1"
source = { registry = "https://pypi.org/simple" }
sdist = { url = "https://files.pythonhosted.org/packages/0b/02/ae6ceac1baeda530866a85075641cec12989bd8d31af6d5ab4a3e8c92f47/webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923", size = 9721 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/f4/24/2a3e3df732393fed8b3ebf2ec078f05546de641fe1b667ee316ec1dcf3b7/webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78", size = 11774 },
]
[[package]]
name = "websockets"
version = "15.0.1"
@ -3864,6 +4104,26 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/fa/a8/5b41e0da817d64113292ab1f8247140aac61cbf6cfd085d6a0fa77f4984f/websockets-15.0.1-py3-none-any.whl", hash = "sha256:f7a866fbc1e97b5c617ee4116daaa09b722101d4a3c170c787450ba409f9736f", size = 169743 },
]
[[package]]
name = "xhtml2pdf"
version = "0.2.17"
source = { registry = "https://pypi.org/simple" }
dependencies = [
{ name = "arabic-reshaper" },
{ name = "html5lib" },
{ name = "pillow" },
{ name = "pyhanko" },
{ name = "pyhanko-certvalidator" },
{ name = "pypdf" },
{ name = "python-bidi" },
{ name = "reportlab" },
{ name = "svglib" },
]
sdist = { url = "https://files.pythonhosted.org/packages/da/9a/3b29831d8617ecbcf0b0aaa2b3e1b24f3fd1bbd204678ae86e9fee2f4239/xhtml2pdf-0.2.17.tar.gz", hash = "sha256:09ddbc31aa0e38a16f2f3cb73be89af5f7c968c17a564afdd685d280e39c526d", size = 139727 }
wheels = [
{ url = "https://files.pythonhosted.org/packages/93/ca/d53764f0534ff857239595f090f4cb83b599d226cc326c7de5eb3d802715/xhtml2pdf-0.2.17-py3-none-any.whl", hash = "sha256:61a7ecac829fed518f7dbcb916e9d56bea6e521e02e54644b3d0ca33f0658315", size = 125349 },
]
[[package]]
name = "xlrd"
version = "2.0.2"