mirror of
https://github.com/camel-ai/owl.git
synced 2025-12-26 10:07:51 +08:00
parth
This commit is contained in:
parent
19b2ba36ec
commit
a4d180de49
@ -0,0 +1,27 @@
|
||||
# API KEYS
|
||||
# ===========================================
|
||||
# Choose ONE of the following API providers:
|
||||
|
||||
# Option 1: OpenAI API (recommended for best results)
|
||||
# Get API key from: https://platform.openai.com/api-keys
|
||||
OPENAI_API_KEY=""
|
||||
|
||||
# Option 2: OpenRouter API (for access to Gemini and other models)
|
||||
# Get API key from: https://openrouter.ai/keys
|
||||
OPENROUTER_API_KEY=""
|
||||
|
||||
# SEARCH CAPABILITIES
|
||||
# ===========================================
|
||||
# Optional but recommended for enhanced research
|
||||
|
||||
# Google Search API (optional but recommended)
|
||||
# Get from: https://programmablesearchengine.google.com/
|
||||
GOOGLE_API_KEY=""
|
||||
SEARCH_ENGINE_ID=""
|
||||
|
||||
# ADDITIONAL SETTINGS
|
||||
# ===========================================
|
||||
# Advanced settings (optional)
|
||||
|
||||
# Logging level: DEBUG, INFO, WARNING, ERROR
|
||||
LOG_LEVEL="INFO"
|
||||
176
community_usecase/OWL Interview Preparation Assistant/.gitignore
vendored
Normal file
176
community_usecase/OWL Interview Preparation Assistant/.gitignore
vendored
Normal file
@ -0,0 +1,176 @@
|
||||
# Byte-compiled / optimized / DLL files
|
||||
__pycache__/
|
||||
*.py[cod]
|
||||
*$py.class
|
||||
|
||||
# C extensions
|
||||
*.so
|
||||
|
||||
# Distribution / packaging
|
||||
.Python
|
||||
build/
|
||||
develop-eggs/
|
||||
dist/
|
||||
downloads/
|
||||
eggs/
|
||||
.eggs/
|
||||
lib/
|
||||
lib64/
|
||||
parts/
|
||||
sdist/
|
||||
var/
|
||||
wheels/
|
||||
share/python-wheels/
|
||||
*.egg-info/
|
||||
.installed.cfg
|
||||
*.egg
|
||||
MANIFEST
|
||||
|
||||
# PyInstaller
|
||||
# Usually these files are written by a python script from a template
|
||||
# before PyInstaller builds the exe, so as to inject date/other infos into it.
|
||||
*.manifest
|
||||
*.spec
|
||||
|
||||
# Installer logs
|
||||
pip-log.txt
|
||||
pip-delete-this-directory.txt
|
||||
|
||||
# Unit test / coverage reports
|
||||
htmlcov/
|
||||
.tox/
|
||||
.nox/
|
||||
.coverage
|
||||
.coverage.*
|
||||
.cache
|
||||
nosetests.xml
|
||||
coverage.xml
|
||||
*.cover
|
||||
*.py,cover
|
||||
.hypothesis/
|
||||
.pytest_cache/
|
||||
cover/
|
||||
|
||||
# Translations
|
||||
*.mo
|
||||
*.pot
|
||||
|
||||
# Django stuff:
|
||||
*.log
|
||||
local_settings.py
|
||||
db.sqlite3
|
||||
db.sqlite3-journal
|
||||
|
||||
# Flask stuff:
|
||||
instance/
|
||||
.webassets-cache
|
||||
|
||||
# Scrapy stuff:
|
||||
.scrapy
|
||||
|
||||
# Sphinx documentation
|
||||
docs/_build/
|
||||
|
||||
# PyBuilder
|
||||
.pybuilder/
|
||||
target/
|
||||
|
||||
# Jupyter Notebook
|
||||
.ipynb_checkpoints
|
||||
|
||||
# IPython
|
||||
profile_default/
|
||||
ipython_config.py
|
||||
|
||||
# pyenv
|
||||
# For a library or package, you might want to ignore these files since the code is
|
||||
# intended to run in multiple environments; otherwise, check them in:
|
||||
# .python-version
|
||||
|
||||
# pipenv
|
||||
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
|
||||
# However, in case of collaboration, if having platform-specific dependencies or dependencies
|
||||
# having no cross-platform support, pipenv may install dependencies that don't work, or not
|
||||
# install all needed dependencies.
|
||||
#Pipfile.lock
|
||||
|
||||
# UV
|
||||
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
#uv.lock
|
||||
|
||||
# poetry
|
||||
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
|
||||
# This is especially recommended for binary packages to ensure reproducibility, and is more
|
||||
# commonly ignored for libraries.
|
||||
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
|
||||
#poetry.lock
|
||||
|
||||
# pdm
|
||||
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
|
||||
#pdm.lock
|
||||
# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
|
||||
# in version control.
|
||||
# https://pdm.fming.dev/latest/usage/project/#working-with-version-control
|
||||
.pdm.toml
|
||||
.pdm-python
|
||||
.pdm-build/
|
||||
|
||||
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
|
||||
__pypackages__/
|
||||
|
||||
# Celery stuff
|
||||
celerybeat-schedule
|
||||
celerybeat.pid
|
||||
|
||||
# SageMath parsed files
|
||||
*.sage.py
|
||||
|
||||
# Environments
|
||||
.env
|
||||
.venv
|
||||
env/
|
||||
venv/
|
||||
ENV/
|
||||
env.bak/
|
||||
venv.bak/
|
||||
|
||||
# Spyder project settings
|
||||
.spyderproject
|
||||
.spyproject
|
||||
|
||||
# Rope project settings
|
||||
.ropeproject
|
||||
|
||||
# mkdocs documentation
|
||||
/site
|
||||
|
||||
# mypy
|
||||
.mypy_cache/
|
||||
.dmypy.json
|
||||
dmypy.json
|
||||
|
||||
# Pyre type checker
|
||||
.pyre/
|
||||
|
||||
# pytype static type analyzer
|
||||
.pytype/
|
||||
|
||||
# Cython debug symbols
|
||||
cython_debug/
|
||||
|
||||
# PyCharm
|
||||
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
|
||||
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
|
||||
# and can be added to the global gitignore or merged into this file. For a more nuclear
|
||||
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
|
||||
#.idea/
|
||||
|
||||
# Ruff stuff:
|
||||
.ruff_cache/
|
||||
|
||||
# PyPI configuration file
|
||||
.pypirc
|
||||
|
||||
.directory
|
||||
Binary file not shown.
@ -0,0 +1,98 @@
|
||||
# 🦉 Interview Preparation Assistant: AI-Powered Interview Success
|
||||
|
||||
## Project Overview
|
||||
|
||||
The Interview Preparation Assistant is an advanced multi-agent AI system built on the OWL framework that revolutionizes how job seekers prepare for interviews. By leveraging the power of collaborative AI agents, it delivers personalized, comprehensive, and actionable interview preparation materials tailored to specific job roles and companies.
|
||||
|
||||
## The Problem We're Solving
|
||||
|
||||
Job interviews are critical gateways to career opportunities, yet preparation is often fragmented, generic, and time-consuming:
|
||||
|
||||
- **Information Overload**: Job seekers must sift through countless resources to find relevant information
|
||||
- **Limited Personalization**: Generic interview guides fail to address specific company cultures and role requirements
|
||||
- **Time Constraints**: Comprehensive research and preparation can take weeks of dedicated effort
|
||||
- **Technical Complexity**: For technical roles, preparing appropriate code examples and solutions is challenging
|
||||
- **Anxiety and Uncertainty**: Many candidates feel underprepared, increasing interview anxiety
|
||||
|
||||
## My Solution
|
||||
|
||||
The Interview Preparation Assistant transforms this experience by deploying multiple specialized AI agents working in concert to create a complete interview preparation package:
|
||||
|
||||
### 1. Company Research Agent
|
||||
Performs deep, real-time research on target companies by:
|
||||
- Analyzing company websites, news articles, and social media
|
||||
- Investigating company culture, values, and work environment
|
||||
- Examining technical stacks, product offerings, and industry positioning
|
||||
- Reviewing known interview processes and expectations
|
||||
|
||||
### 2. Question Generation Agent
|
||||
Creates tailored interview questions based on:
|
||||
- The specific job role and required skills
|
||||
- Company-specific technologies and methodologies
|
||||
- Both technical and behavioral aspects of the interview
|
||||
- Current industry trends and best practices
|
||||
|
||||
### 3. Preparation Plan Agent
|
||||
Develops structured preparation plans that include:
|
||||
- Day-by-day preparation schedules
|
||||
- Prioritized study topics and resources
|
||||
- Mock interview scenarios with sample answers
|
||||
- Technical practice problems with detailed solutions
|
||||
|
||||
## Key Differentiators
|
||||
|
||||
What makes my solution unique:
|
||||
|
||||
- **Multi-Agent Collaboration**: Multiple specialized AI agents working together creates more comprehensive and accurate results than a single AI assistant
|
||||
- **Real-Time Research**: Up-to-date information gathered from the web ensures relevance
|
||||
- **Deep Personalization**: Materials tailored to specific companies and roles rather than generic advice
|
||||
- **Technical Depth**: Detailed code examples and technical explanations for engineering roles
|
||||
- **Structured Output**: Clear, organized preparation materials ready for immediate use
|
||||
- **Conversation Transparency**: Users can observe the agents' thought processes, building trust and understanding
|
||||
|
||||
## Value Proposition
|
||||
|
||||
The Interview Preparation Assistant delivers significant value to users by:
|
||||
|
||||
- **Saving Time**: Reduces weeks of research and preparation to minutes
|
||||
- **Increasing Confidence**: Comprehensive preparation materials reduce anxiety and build confidence
|
||||
- **Improving Performance**: Better-prepared candidates perform stronger in interviews
|
||||
- **Accelerating Career Growth**: Higher success rates in job interviews lead to better career opportunities
|
||||
- **Democratizing Access**: Makes high-quality interview preparation accessible to everyone, not just those with professional networks or coaching
|
||||
|
||||
## Use Case Examples
|
||||
|
||||
### Technical Role Preparation
|
||||
A software engineer applying to Google receives:
|
||||
- Detailed analysis of Google's engineering culture and interview process
|
||||
- Coding questions focused on algorithms, data structures, and system design
|
||||
- Google-specific behavioral questions emphasizing innovation and collaboration
|
||||
- A 14-day preparation plan with specific practice exercises
|
||||
|
||||
### Business Role Preparation
|
||||
A marketing manager applying to Apple receives:
|
||||
- Insights into Apple's marketing philosophy and brand positioning
|
||||
- Case study questions focused on product launches and customer experience
|
||||
- Behavioral questions targeting creativity and strategic thinking
|
||||
- A preparation plan emphasizing Apple's unique approach to marketing
|
||||
|
||||
## Technical Implementation
|
||||
|
||||
The system is built using:
|
||||
- **OWL Multi-Agent Framework**: Enabling coordinated collaboration between specialized AI agents
|
||||
- **Dynamic Research Tools**: Real-time web search and content processing
|
||||
- **Streamlit Interface**: User-friendly web application for easy interaction
|
||||
- **Advanced LLM Models**: Utilizing state-of-the-art language models (OpenAI/Gemini)
|
||||
|
||||
## Impact and Future Development
|
||||
|
||||
The Interview Preparation Assistant demonstrates the transformative potential of multi-agent AI systems for personalized knowledge work. Future development paths include:
|
||||
|
||||
- **Interview Simulation**: Interactive mock interviews with feedback
|
||||
- **Performance Analytics**: Tracking preparation progress and identifying areas for improvement
|
||||
- **Specialized Modules**: Domain-specific preparation for fields like healthcare, finance, etc.
|
||||
- **Mentor Matching**: Connecting candidates with industry professionals based on preparation insights
|
||||
|
||||
---
|
||||
|
||||
This project showcases how OWL's collaborative AI framework can transform complex, knowledge-intensive tasks that traditionally required significant human effort into accessible, high-quality services available on demand.
|
||||
238
community_usecase/OWL Interview Preparation Assistant/README.md
Normal file
238
community_usecase/OWL Interview Preparation Assistant/README.md
Normal file
@ -0,0 +1,238 @@
|
||||
# 🦉 Interview Preparation Assistant
|
||||
|
||||
An intelligent multi-agent interview preparation system powered by the OWL framework that helps you prepare for job interviews with comprehensive research, tailored questions, and detailed preparation plans.
|
||||
|
||||
![Interview Preparation Assistant]()
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- **🔍 Company Research**: Automatically researches companies using real-time web data
|
||||
- **❓ Interview Question Generation**: Creates tailored interview questions specific to your job role and target company
|
||||
- **📋 Preparation Plans**: Builds comprehensive step-by-step interview preparation plans
|
||||
- **🧠 AI-Powered Agents**: Leverages multiple AI agents to work together on your interview preparation
|
||||
- **💻 Code Examples**: Provides code examples for technical roles with explanations
|
||||
- **🔄 Real-time Progress**: Shows conversation process between AI agents as they prepare your materials
|
||||
|
||||
## 📋 Table of Contents
|
||||
|
||||
- [Requirements](#-requirements)
|
||||
- [Installation](#-installation)
|
||||
- [Quick Start](#-quick-start)
|
||||
- [Usage Guide](#-usage-guide)
|
||||
- [Configuration](#-configuration)
|
||||
- [Troubleshooting](#-troubleshooting)
|
||||
- [Project Structure](#-project-structure)
|
||||
|
||||
## 🛠 Requirements
|
||||
|
||||
- Python 3.10+ (tested on Python 3.10)
|
||||
- Access to one of the following AI models:
|
||||
- OpenAI API (GPT-4)
|
||||
- OpenRouter API (Gemini models)
|
||||
- Internet connection for web search and company research
|
||||
- Minimum 8GB RAM
|
||||
|
||||
## 🚀 Installation
|
||||
|
||||
### 1. Clone the OWL Repository
|
||||
|
||||
First, clone the OWL repository, which this project depends on:
|
||||
|
||||
```bash
|
||||
git clone https://github.com/camel-ai/owl.git
|
||||
cd owl
|
||||
```
|
||||
|
||||
### 2. Create a Virtual Environment
|
||||
|
||||
```bash
|
||||
# Create a conda environment (recommended)
|
||||
conda create -n interview_assistant python=3.10
|
||||
conda activate interview_assistant
|
||||
|
||||
# OR using venv
|
||||
python -m venv interview_env
|
||||
source interview_env/bin/activate # On Windows: interview_env\Scripts\activate
|
||||
```
|
||||
|
||||
### 3. Install OWL and Dependencies
|
||||
|
||||
```bash
|
||||
# Install OWL
|
||||
pip install -e .
|
||||
|
||||
# Install additional dependencies
|
||||
pip install streamlit numpy pandas opencv-python
|
||||
```
|
||||
|
||||
### 4. Configure API Keys
|
||||
|
||||
Create a `.env` file in the project directory with your API keys:
|
||||
|
||||
```bash
|
||||
# Navigate to the Interview Preparation Assistant directory
|
||||
cd community_usecase/new\ int/
|
||||
|
||||
# Create .env file
|
||||
touch .env
|
||||
```
|
||||
|
||||
Add your API keys to the `.env` file:
|
||||
|
||||
```
|
||||
# OpenAI API (recommended for best results)
|
||||
OPENAI_API_KEY=your_openai_api_key_here
|
||||
|
||||
# OR OpenRouter API (for access to Gemini models)
|
||||
OPENROUTER_API_KEY=your_openrouter_api_key_here
|
||||
|
||||
# Optional: Google Search API for enhanced research (optional)
|
||||
GOOGLE_API_KEY=your_google_api_key_here
|
||||
SEARCH_ENGINE_ID=your_google_search_engine_id_here
|
||||
```
|
||||
|
||||
## ⚡ Quick Start
|
||||
|
||||
The fastest way to get started is to use the Streamlit web interface:
|
||||
|
||||
```bash
|
||||
# Navigate to the project directory
|
||||
cd community_usecase/new\ int/
|
||||
|
||||
# Start the web application
|
||||
streamlit run app.py
|
||||
```
|
||||
|
||||
This will open a web browser window with the Interview Preparation Assistant interface where you can:
|
||||
|
||||
1. Enter your target job role (e.g., "Machine Learning Engineer")
|
||||
2. Enter your target company name (e.g., "Google")
|
||||
3. Generate interview preparation materials
|
||||
|
||||
## 📚 Usage Guide
|
||||
|
||||
### Web Interface
|
||||
|
||||
The web interface provides three main functions:
|
||||
|
||||
#### 1. Company Research
|
||||
|
||||
Click on "Research Company" to generate a comprehensive report about your target company including:
|
||||
- Company background and culture
|
||||
- Technical stack and technologies used
|
||||
- Interview process and expectations
|
||||
- Key products and services
|
||||
|
||||
|
||||
#### 2. Interview Questions
|
||||
|
||||
Click on "Generate Questions" to create tailored interview questions for your role and company:
|
||||
- Technical questions with code examples
|
||||
- Behavioral questions specific to the company culture
|
||||
- Role-specific questions to showcase your expertise
|
||||
- Sample answers and solution approaches
|
||||
|
||||
|
||||
#### 3. Preparation Plan
|
||||
|
||||
Click on "Create Preparation Plan" to receive a detailed day-by-day preparation guide:
|
||||
- Structured preparation timeline
|
||||
- Technical topics to review
|
||||
- Practice exercises and code challenges
|
||||
- Research and preparation tasks
|
||||
- Interview day tips
|
||||
|
||||
|
||||
### Command Line Usage
|
||||
|
||||
You can also run specific functions from the command line:
|
||||
|
||||
```bash
|
||||
# Run company research
|
||||
python -c "from main import research_company; result = research_company('Google', detailed=True); print(result['answer'])"
|
||||
|
||||
# Generate interview questions
|
||||
python -c "from main import generate_interview_questions; result = generate_interview_questions('Machine Learning Engineer', 'Google'); print(result['answer'])"
|
||||
|
||||
# Create preparation plan
|
||||
python -c "from main import create_interview_prep_plan; result = create_interview_prep_plan('Machine Learning Engineer', 'Google'); print(result['answer'])"
|
||||
```
|
||||
|
||||
### Log Monitoring
|
||||
|
||||
You can view the logs in real-time in the "System Logs" tab of the web interface to monitor:
|
||||
- AI agent conversations
|
||||
- Progress of each request
|
||||
- Any errors or issues that occur
|
||||
|
||||
## ⚙️ Configuration
|
||||
|
||||
### Customizing Parameters
|
||||
|
||||
You can adjust the following parameters in `main.py`:
|
||||
|
||||
1. **Round Limit**: Change the conversation round limit by modifying the `round_limit` parameter in function calls (default: 5)
|
||||
|
||||
2. **Model Selection**: Edit the model configuration in `construct_interview_assistant()` to use different models
|
||||
|
||||
3. **Output Directory**: Change `INTERVIEW_PREP_DIR` to customize where results are stored
|
||||
|
||||
### Environment Variables
|
||||
|
||||
In addition to API keys, you can customize behavior with these environment variables:
|
||||
|
||||
- `LOG_LEVEL`: Set to `DEBUG`, `INFO`, `WARNING`, or `ERROR` to control logging verbosity
|
||||
|
||||
## 🔧 Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **API Key Errors**
|
||||
- Ensure your API keys are correctly set in the `.env` file
|
||||
- Check that you're using the correct format without quotes or extra spaces
|
||||
|
||||
2. **Model Errors**
|
||||
- If using OpenRouter, ensure the model specified is available on your account
|
||||
- Verify you have sufficient API credits for your requests
|
||||
|
||||
3. **Round Limit Not Working**
|
||||
- The system enforces a strict limit of 5 conversation rounds to prevent excessive token usage
|
||||
- You can adjust this in the code if needed, but may encounter higher API costs
|
||||
|
||||
4. **Memory Errors**
|
||||
- Processing large contexts can require significant memory
|
||||
- Try using a machine with more RAM or reducing model context sizes
|
||||
|
||||
### Getting Help
|
||||
|
||||
If you encounter issues not covered here:
|
||||
|
||||
1. Check the logs in the "System Logs" tab of the web interface
|
||||
2. Examine the console output for error messages
|
||||
3. File an issue on the GitHub repository
|
||||
|
||||
## 📂 Project Structure
|
||||
|
||||
```
|
||||
community_usecase/new int/
|
||||
├── app.py # Streamlit web interface
|
||||
├── main.py # Core functionality and API connections
|
||||
├── config/
|
||||
│ └── prompts.py # Prompt templates for different tasks
|
||||
├── interview_prep/ # Generated interview preparation materials
|
||||
├── logging_utils.py # Logging utilities
|
||||
└── README.md # This documentation
|
||||
```
|
||||
|
||||
## 📝 License
|
||||
|
||||
This project is built on top of the CAMEL-AI OWL framework, which is licensed under the Apache License 2.0.
|
||||
|
||||
## 🙏 Acknowledgements
|
||||
|
||||
- This project is built on the [CAMEL-AI OWL framework](https://github.com/camel-ai/owl)
|
||||
- Special thanks to the contributors of CAMEL-AI for making multi-agent AI systems accessible
|
||||
|
||||
---
|
||||
|
||||
Made with ❤️ for job seekers everywhere.
|
||||
Binary file not shown.
|
After Width: | Height: | Size: 187 KiB |
460
community_usecase/OWL Interview Preparation Assistant/app.py
Normal file
460
community_usecase/OWL Interview Preparation Assistant/app.py
Normal file
@ -0,0 +1,460 @@
|
||||
#app.py
|
||||
import os
|
||||
import streamlit as st
|
||||
import logging
|
||||
import queue
|
||||
import time
|
||||
import sys
|
||||
|
||||
# Add parent directory to path for OWL imports
|
||||
sys.path.append('../')
|
||||
|
||||
try:
|
||||
from main import research_company, generate_interview_questions, create_interview_prep_plan
|
||||
except ImportError as e:
|
||||
st.error(f"Error importing functions: {e}")
|
||||
st.stop()
|
||||
|
||||
# Setup logging with queue to capture logs for display
|
||||
log_queue = queue.Queue()
|
||||
|
||||
class StreamlitLogHandler(logging.Handler):
|
||||
def __init__(self, log_queue):
|
||||
super().__init__()
|
||||
self.log_queue = log_queue
|
||||
self.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
|
||||
def emit(self, record):
|
||||
log_entry = self.format(record)
|
||||
self.log_queue.put(log_entry)
|
||||
|
||||
# Configure root logger
|
||||
root_logger = logging.getLogger()
|
||||
root_logger.setLevel(logging.INFO)
|
||||
for handler in root_logger.handlers[:]:
|
||||
root_logger.removeHandler(handler)
|
||||
root_logger.addHandler(StreamlitLogHandler(log_queue))
|
||||
root_logger.addHandler(logging.StreamHandler()) # Also log to console
|
||||
|
||||
# Configure Streamlit page
|
||||
st.set_page_config(
|
||||
page_title="Interview Prep Assistant(With OWL 🦉)",
|
||||
page_icon="🦉",
|
||||
layout="wide"
|
||||
)
|
||||
|
||||
# Custom CSS
|
||||
st.markdown("""
|
||||
<style>
|
||||
.main-title {
|
||||
font-size: 2.5rem;
|
||||
color: #4a89dc;
|
||||
text-align: center;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
.sub-title {
|
||||
font-size: 1.2rem;
|
||||
color: #666;
|
||||
text-align: center;
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
.conversation-container {
|
||||
border: 1px solid #e0e0e0;
|
||||
border-radius: 8px;
|
||||
margin: 10px 0;
|
||||
padding: 10px;
|
||||
max-height: 500px;
|
||||
overflow-y: auto;
|
||||
}
|
||||
.user-message {
|
||||
background-color: #f0f7ff;
|
||||
border-left: 4px solid #4a89dc;
|
||||
padding: 10px;
|
||||
margin: 8px 0;
|
||||
border-radius: 4px;
|
||||
}
|
||||
.assistant-message {
|
||||
background-color: #f1f8e9;
|
||||
border-left: 4px solid #7cb342;
|
||||
padding: 10px;
|
||||
margin: 8px 0;
|
||||
border-radius: 4px;
|
||||
}
|
||||
.tool-call {
|
||||
background-color: #fff8e1;
|
||||
border: 1px solid #ffe0b2;
|
||||
padding: 8px;
|
||||
margin: 5px 0;
|
||||
border-radius: 4px;
|
||||
font-family: monospace;
|
||||
font-size: 0.9em;
|
||||
}
|
||||
.round-header {
|
||||
background-color: #e8eaf6;
|
||||
padding: 5px 10px;
|
||||
font-weight: bold;
|
||||
border-radius: 4px;
|
||||
margin: 15px 0 5px 0;
|
||||
}
|
||||
.final-answer {
|
||||
background-color: #e8f5e9;
|
||||
border-left: 5px solid #43a047;
|
||||
padding: 15px;
|
||||
margin: 15px 0;
|
||||
border-radius: 4px;
|
||||
}
|
||||
.metrics-container {
|
||||
display: flex;
|
||||
justify-content: space-around;
|
||||
margin: 15px 0;
|
||||
padding: 10px;
|
||||
background-color: #f5f5f5;
|
||||
border-radius: 4px;
|
||||
}
|
||||
.metric-box {
|
||||
text-align: center;
|
||||
padding: 8px 15px;
|
||||
background-color: white;
|
||||
border-radius: 8px;
|
||||
box-shadow: 0 1px 3px rgba(0,0,0,0.12);
|
||||
}
|
||||
.metric-value {
|
||||
font-size: 1.4rem;
|
||||
font-weight: bold;
|
||||
color: #4a89dc;
|
||||
}
|
||||
.metric-label {
|
||||
font-size: 0.8rem;
|
||||
color: #666;
|
||||
}
|
||||
.running-indicator {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
gap: 10px;
|
||||
margin: 15px 0;
|
||||
padding: 10px;
|
||||
background-color: #e3f2fd;
|
||||
border-radius: 4px;
|
||||
animation: pulse 2s infinite;
|
||||
}
|
||||
@keyframes pulse {
|
||||
0% { opacity: 1; }
|
||||
50% { opacity: 0.7; }
|
||||
100% { opacity: 1; }
|
||||
}
|
||||
</style>
|
||||
""", unsafe_allow_html=True)
|
||||
|
||||
def display_conversation(chat_history):
|
||||
"""Display the conversation history in a structured format"""
|
||||
if not chat_history:
|
||||
st.info("No conversation available")
|
||||
return
|
||||
|
||||
st.markdown("<div class='conversation-container'>", unsafe_allow_html=True)
|
||||
|
||||
for idx, message in enumerate(chat_history):
|
||||
round_num = idx + 1
|
||||
st.markdown(f"<div class='round-header'>Round {round_num}</div>", unsafe_allow_html=True)
|
||||
|
||||
# Display user message
|
||||
if "user" in message and message["user"]:
|
||||
st.markdown(f"<div class='user-message'><b>🧑💼 Job Seeker:</b><br>{message['user']}</div>", unsafe_allow_html=True)
|
||||
|
||||
# Display assistant message
|
||||
if "assistant" in message and message["assistant"]:
|
||||
assistant_content = message["assistant"]
|
||||
# Remove any note about truncation for cleaner display
|
||||
if "[Note: This conversation was limited" in assistant_content:
|
||||
assistant_content = assistant_content.replace("[Note: This conversation was limited to maintain response quality. The complete thought process is available in the logs.]", "")
|
||||
|
||||
st.markdown(f"<div class='assistant-message'><b>🦉 Interview Coach:</b><br>{assistant_content}</div>", unsafe_allow_html=True)
|
||||
|
||||
# Display tool calls if any
|
||||
if "tool_calls" in message and message["tool_calls"]:
|
||||
for tool in message["tool_calls"]:
|
||||
tool_name = tool.get('name', 'Unknown Tool')
|
||||
st.markdown(f"<div class='tool-call'><b>🔧 Tool Used: {tool_name}</b></div>", unsafe_allow_html=True)
|
||||
|
||||
st.markdown("</div>", unsafe_allow_html=True)
|
||||
|
||||
def display_metrics(duration, token_count, num_rounds):
|
||||
"""Display metrics in a visually appealing format"""
|
||||
st.markdown("<div class='metrics-container'>", unsafe_allow_html=True)
|
||||
|
||||
# Time taken
|
||||
st.markdown(f"""
|
||||
<div class='metric-box'>
|
||||
<div class='metric-value'>{duration:.1f}s</div>
|
||||
<div class='metric-label'>Execution Time</div>
|
||||
</div>
|
||||
""", unsafe_allow_html=True)
|
||||
|
||||
# Token usage
|
||||
completion_tokens = token_count.get('completion_token_count', 0)
|
||||
prompt_tokens = token_count.get('prompt_token_count', 0)
|
||||
total_tokens = completion_tokens + prompt_tokens
|
||||
|
||||
st.markdown(f"""
|
||||
<div class='metric-box'>
|
||||
<div class='metric-value'>{total_tokens:,}</div>
|
||||
<div class='metric-label'>Total Tokens</div>
|
||||
</div>
|
||||
""", unsafe_allow_html=True)
|
||||
|
||||
# Conversation rounds
|
||||
st.markdown(f"""
|
||||
<div class='metric-box'>
|
||||
<div class='metric-value'>{num_rounds}</div>
|
||||
<div class='metric-label'>Conversation Rounds</div>
|
||||
</div>
|
||||
""", unsafe_allow_html=True)
|
||||
|
||||
st.markdown("</div>", unsafe_allow_html=True)
|
||||
|
||||
def get_logs():
|
||||
"""Retrieve logs from the queue"""
|
||||
logs = []
|
||||
while not log_queue.empty():
|
||||
try:
|
||||
logs.append(log_queue.get_nowait())
|
||||
except queue.Empty:
|
||||
break
|
||||
return logs
|
||||
|
||||
def main():
|
||||
# Header
|
||||
st.markdown("<h1 class='main-title'>🦉 Interview Preparation Assistant</h1>", unsafe_allow_html=True)
|
||||
st.markdown("<p class='sub-title'>Powered by multi-agent AI collaboration</p>", unsafe_allow_html=True)
|
||||
|
||||
# Input section
|
||||
with st.container():
|
||||
col1, col2 = st.columns(2)
|
||||
with col1:
|
||||
job_role = st.text_input("Job Role", "Machine Learning Engineer")
|
||||
with col2:
|
||||
company_name = st.text_input("Company Name", "Google")
|
||||
|
||||
# Main functionality tabs
|
||||
tab1, tab2, tab3, tab4 = st.tabs(["Company Research", "Interview Questions", "Preparation Plan", "System Logs"])
|
||||
|
||||
# Tab 1: Company Research
|
||||
with tab1:
|
||||
st.header("🔍 Company Research")
|
||||
st.write("Get detailed insights about the company to help with your interview preparation.")
|
||||
|
||||
if st.button("Research Company", use_container_width=True):
|
||||
with st.spinner():
|
||||
# Display running indicator
|
||||
status = st.empty()
|
||||
status.markdown("<div class='running-indicator'>🔄 Researching company information...</div>", unsafe_allow_html=True)
|
||||
|
||||
# Progress bar
|
||||
progress = st.progress(0)
|
||||
|
||||
# Progress callback
|
||||
def update_progress(current_round, max_rounds):
|
||||
progress_value = min(current_round / max_rounds, 0.95)
|
||||
progress.progress(progress_value)
|
||||
status.markdown(f"<div class='running-indicator'>🔄 Processing conversation round {current_round}/{max_rounds}...</div>", unsafe_allow_html=True)
|
||||
|
||||
# Execute research
|
||||
try:
|
||||
start_time = time.time()
|
||||
result = research_company(
|
||||
company_name=company_name,
|
||||
detailed=True, # Always use detailed mode
|
||||
limited_searches=False, # Don't limit searches
|
||||
progress_callback=update_progress
|
||||
)
|
||||
duration = time.time() - start_time
|
||||
|
||||
# Update progress to complete
|
||||
progress.progress(1.0)
|
||||
status.markdown("<div class='running-indicator' style='background-color: #e8f5e9;'>✅ Research completed!</div>", unsafe_allow_html=True)
|
||||
|
||||
# Display metrics
|
||||
display_metrics(
|
||||
duration=duration,
|
||||
token_count=result["token_count"],
|
||||
num_rounds=len(result["chat_history"])
|
||||
)
|
||||
|
||||
# Display final answer
|
||||
st.subheader("📝 Research Results")
|
||||
st.markdown(f"<div class='final-answer'>{result['answer']}</div>", unsafe_allow_html=True)
|
||||
|
||||
# Display conversation
|
||||
st.subheader("💬 Conversation Process")
|
||||
display_conversation(result["chat_history"])
|
||||
|
||||
except Exception as e:
|
||||
st.error(f"Error: {str(e)}")
|
||||
logging.exception("Error in company research")
|
||||
|
||||
# Tab 2: Interview Questions
|
||||
with tab2:
|
||||
st.header("❓ Interview Questions")
|
||||
st.write("Generate tailored interview questions for your target role and company.")
|
||||
|
||||
# Question type selector (adds interactivity but doesn't change behavior for now)
|
||||
question_type = st.radio(
|
||||
"Question Type",
|
||||
["Technical", "Behavioral", "Company-Specific", "All"],
|
||||
horizontal=True
|
||||
)
|
||||
|
||||
if st.button("Generate Questions", use_container_width=True):
|
||||
with st.spinner():
|
||||
# Display running indicator
|
||||
status = st.empty()
|
||||
status.markdown("<div class='running-indicator'>🔄 Creating interview questions...</div>", unsafe_allow_html=True)
|
||||
|
||||
# Progress bar
|
||||
progress = st.progress(0)
|
||||
|
||||
# Progress callback
|
||||
def update_progress(current_round, max_rounds):
|
||||
progress_value = min(current_round / max_rounds, 0.95)
|
||||
progress.progress(progress_value)
|
||||
status.markdown(f"<div class='running-indicator'>🔄 Processing conversation round {current_round}/{max_rounds}...</div>", unsafe_allow_html=True)
|
||||
|
||||
# Execute question generation
|
||||
try:
|
||||
start_time = time.time()
|
||||
result = generate_interview_questions(
|
||||
job_role=job_role,
|
||||
company_name=company_name,
|
||||
detailed=True, # Always use detailed mode
|
||||
limited_searches=False, # Don't limit searches
|
||||
progress_callback=update_progress
|
||||
)
|
||||
duration = time.time() - start_time
|
||||
|
||||
# Update progress to complete
|
||||
progress.progress(1.0)
|
||||
status.markdown("<div class='running-indicator' style='background-color: #e8f5e9;'>✅ Questions generated!</div>", unsafe_allow_html=True)
|
||||
|
||||
# Display metrics
|
||||
display_metrics(
|
||||
duration=duration,
|
||||
token_count=result["token_count"],
|
||||
num_rounds=len(result["chat_history"])
|
||||
)
|
||||
|
||||
# Display final answer
|
||||
st.subheader("📝 Generated Questions")
|
||||
st.markdown(f"<div class='final-answer'>{result['answer']}</div>", unsafe_allow_html=True)
|
||||
|
||||
# Display conversation
|
||||
st.subheader("💬 Conversation Process")
|
||||
display_conversation(result["chat_history"])
|
||||
|
||||
except Exception as e:
|
||||
st.error(f"Error: {str(e)}")
|
||||
logging.exception("Error in question generation")
|
||||
|
||||
# Tab 3: Preparation Plan
|
||||
with tab3:
|
||||
st.header("📋 Interview Preparation Plan")
|
||||
st.write("Create a comprehensive step-by-step plan to prepare for your interview.")
|
||||
|
||||
if st.button("Create Preparation Plan", use_container_width=True):
|
||||
with st.spinner():
|
||||
# Display running indicator
|
||||
status = st.empty()
|
||||
status.markdown("<div class='running-indicator'>🔄 Creating preparation plan...</div>", unsafe_allow_html=True)
|
||||
|
||||
# Progress bar
|
||||
progress = st.progress(0)
|
||||
|
||||
# Progress callback
|
||||
def update_progress(current_round, max_rounds):
|
||||
progress_value = min(current_round / max_rounds, 0.95)
|
||||
progress.progress(progress_value)
|
||||
status.markdown(f"<div class='running-indicator'>🔄 Processing conversation round {current_round}/{max_rounds}...</div>", unsafe_allow_html=True)
|
||||
|
||||
# Execute plan creation
|
||||
try:
|
||||
start_time = time.time()
|
||||
result = create_interview_prep_plan(
|
||||
job_role=job_role,
|
||||
company_name=company_name,
|
||||
detailed=True, # Always use detailed mode
|
||||
limited_searches=False, # Don't limit searches
|
||||
progress_callback=update_progress
|
||||
)
|
||||
duration = time.time() - start_time
|
||||
|
||||
# Update progress to complete
|
||||
progress.progress(1.0)
|
||||
status.markdown("<div class='running-indicator' style='background-color: #e8f5e9;'>✅ Plan created!</div>", unsafe_allow_html=True)
|
||||
|
||||
# Display metrics
|
||||
display_metrics(
|
||||
duration=duration,
|
||||
token_count=result["token_count"],
|
||||
num_rounds=len(result["chat_history"])
|
||||
)
|
||||
|
||||
# Display final answer
|
||||
st.subheader("📝 Preparation Plan")
|
||||
st.markdown(f"<div class='final-answer'>{result['answer']}</div>", unsafe_allow_html=True)
|
||||
|
||||
# Display conversation
|
||||
st.subheader("💬 Conversation Process")
|
||||
display_conversation(result["chat_history"])
|
||||
|
||||
except Exception as e:
|
||||
st.error(f"Error: {str(e)}")
|
||||
logging.exception("Error in preparation plan creation")
|
||||
|
||||
# Tab 4: System Logs
|
||||
with tab4:
|
||||
st.header("🔧 System Logs")
|
||||
st.write("View detailed system logs for debugging.")
|
||||
|
||||
logs_container = st.empty()
|
||||
|
||||
# Get and display logs
|
||||
logs = get_logs()
|
||||
if logs:
|
||||
logs_container.code("\n".join(logs))
|
||||
else:
|
||||
logs_container.info("No logs available yet.")
|
||||
|
||||
# Manual refresh button
|
||||
if st.button("Refresh Logs"):
|
||||
logs = get_logs()
|
||||
if logs:
|
||||
logs_container.code("\n".join(logs))
|
||||
else:
|
||||
logs_container.info("No logs available yet.")
|
||||
|
||||
# Auto-refresh toggle
|
||||
auto_refresh = st.checkbox("Auto-refresh logs", value=True)
|
||||
if auto_refresh:
|
||||
st.markdown(
|
||||
"""
|
||||
<script>
|
||||
function refreshLogs() {
|
||||
const checkbox = document.querySelector('.stCheckbox input[type="checkbox"]');
|
||||
if (checkbox && checkbox.checked) {
|
||||
const refreshButton = document.querySelector('button:contains("Refresh Logs")');
|
||||
if (refreshButton) refreshButton.click();
|
||||
}
|
||||
setTimeout(refreshLogs, 3000);
|
||||
}
|
||||
setTimeout(refreshLogs, 3000);
|
||||
</script>
|
||||
""",
|
||||
unsafe_allow_html=True
|
||||
)
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
logging.info("Starting Interview Preparation Assistant application")
|
||||
main()
|
||||
except Exception as e:
|
||||
st.error(f"Application error: {str(e)}")
|
||||
logging.exception("Application error")
|
||||
@ -0,0 +1,59 @@
|
||||
def get_system_prompt() -> str:
|
||||
"""Get the enhanced system prompt for the interview assistant."""
|
||||
return """
|
||||
You are an advanced Interview Preparation Assistant powered by OWL multi-agent technology.
|
||||
Your primary task is to provide COMPREHENSIVE, EXTREMELY DETAILED, and HIGHLY SPECIFIC
|
||||
interview preparation materials with practical examples and actionable advice.
|
||||
|
||||
IMPORTANT OUTPUT REQUIREMENTS:
|
||||
|
||||
1. EXTREME DETAIL: Do not summarize or truncate your responses. Provide complete, comprehensive
|
||||
information with multiple sections, subsections, and extensive details. The final output
|
||||
should be at least 2000 words, ideally 3000-4000 for truly thorough coverage.
|
||||
|
||||
2. PRACTICAL CODE EXAMPLES: For technical roles, include relevant code snippets, detailed
|
||||
technical scenarios, and at least 5-10 code samples or system design outlines.
|
||||
|
||||
3. COMPREHENSIVE CONTENT: Create exceptionally thorough content with step-by-step instructions,
|
||||
deep explanations, and multiple examples. Never abbreviate or summarize your responses.
|
||||
|
||||
4. NO TRUNCATION: Never cut off your responses with '...' or similar. Always provide the
|
||||
complete thought or explanation.
|
||||
|
||||
5. STRUCTURED OUTPUT: Use clear headings (H1, H2, H3, etc.), bullet points, numbered lists,
|
||||
and well-organized sections to present the content in a digestible way.
|
||||
|
||||
6. SPECIFIC IMPLEMENTATIONS: For technical roles, always provide multiple code examples,
|
||||
approaches, edge cases, and relevant optimizations.
|
||||
|
||||
7. FILE MANAGEMENT: You may save all information as well-formatted files, but also include
|
||||
the entire unabridged content directly in your response.
|
||||
"""
|
||||
|
||||
def get_company_research_prompt(company_name: str) -> str:
|
||||
"""Get a specialized prompt for company research."""
|
||||
return f"""
|
||||
Conduct the most COMPREHENSIVE and EXTREMELY DETAILED research on {company_name} possible.
|
||||
The final output must be at least 3000 words, covering the company's history, mission,
|
||||
technology stack, culture, interview process, and more. Provide code or architecture
|
||||
examples if relevant, and do not abbreviate or summarize.
|
||||
"""
|
||||
|
||||
def get_question_generator_prompt(job_role: str, company_name: str) -> str:
|
||||
"""Get a specialized prompt for interview question generation."""
|
||||
return f"""
|
||||
Generate an EXTREMELY COMPREHENSIVE, EXHAUSTIVELY DETAILED set of interview questions for
|
||||
a {job_role} position at {company_name}. Provide at least 30 questions with deep sample
|
||||
answers, code examples, multiple solution approaches, and a total of 3000+ words.
|
||||
Do not truncate or summarize.
|
||||
"""
|
||||
|
||||
def get_preparation_plan_prompt(job_role: str, company_name: str) -> str:
|
||||
"""Get a specialized prompt for creating an interview preparation plan."""
|
||||
return f"""
|
||||
Create a HIGHLY THOROUGH, MULTI-DAY interview preparation plan for a {job_role} position
|
||||
at {company_name}. The final plan should exceed 2000 words, with detailed daily tasks,
|
||||
technical reviews, code examples (if relevant), and no summary or truncation.
|
||||
Cover everything from fundamental skills to advanced interview strategies.
|
||||
"""
|
||||
|
||||
@ -0,0 +1,275 @@
|
||||
import os
|
||||
import logging
|
||||
import time
|
||||
import functools
|
||||
import inspect
|
||||
import re
|
||||
from typing import Dict, Any, List, Tuple, Callable, Optional
|
||||
import queue
|
||||
|
||||
# Create a singleton log queue that can be shared between modules
|
||||
class LogQueueSingleton:
|
||||
_instance = None
|
||||
|
||||
@classmethod
|
||||
def get_instance(cls):
|
||||
if cls._instance is None:
|
||||
cls._instance = queue.Queue()
|
||||
return cls._instance
|
||||
|
||||
# Custom logging wrapper for tools
|
||||
def log_tool_usage(func):
|
||||
"""
|
||||
Decorator to log when a tool is being used.
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
async def wrapper(*args, **kwargs):
|
||||
tool_name = func.__name__
|
||||
logging.info(f"🔧 TOOL TRIGGERED: {tool_name}")
|
||||
try:
|
||||
# Sanitize arguments to avoid logging sensitive info
|
||||
safe_args = sanitize_args(args)
|
||||
safe_kwargs = {k: sanitize_value(v) for k, v in kwargs.items()}
|
||||
logging.info(f"🔍 TOOL ARGS: {tool_name} called with {len(safe_kwargs)} parameters")
|
||||
|
||||
result = await func(*args, **kwargs)
|
||||
|
||||
# Log completion but not the actual result content (might be large or sensitive)
|
||||
logging.info(f"✅ TOOL COMPLETED: {tool_name}")
|
||||
return result
|
||||
except Exception as e:
|
||||
logging.error(f"❌ TOOL ERROR: {tool_name} - {str(e)}")
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
# Non-async version for synchronous functions
|
||||
def log_tool_usage_sync(func):
|
||||
"""
|
||||
Decorator to log when a synchronous tool is being used.
|
||||
"""
|
||||
@functools.wraps(func)
|
||||
def wrapper(*args, **kwargs):
|
||||
tool_name = func.__name__
|
||||
logging.info(f"🔧 TOOL TRIGGERED: {tool_name}")
|
||||
try:
|
||||
# Sanitize arguments to avoid logging sensitive info
|
||||
safe_args = sanitize_args(args)
|
||||
safe_kwargs = {k: sanitize_value(v) for k, v in kwargs.items()}
|
||||
logging.info(f"🔍 TOOL ARGS: {tool_name} called with {len(safe_kwargs)} parameters")
|
||||
|
||||
result = func(*args, **kwargs)
|
||||
|
||||
# Log completion but not the actual result content (might be large or sensitive)
|
||||
logging.info(f"✅ TOOL COMPLETED: {tool_name}")
|
||||
return result
|
||||
except Exception as e:
|
||||
logging.error(f"❌ TOOL ERROR: {tool_name} - {str(e)}")
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
def sanitize_args(args):
|
||||
"""Sanitize arguments for logging to avoid sensitive data."""
|
||||
safe_args = []
|
||||
for arg in args:
|
||||
safe_args.append(sanitize_value(arg))
|
||||
return safe_args
|
||||
|
||||
def sanitize_value(value):
|
||||
"""Sanitize a value for logging."""
|
||||
if isinstance(value, str):
|
||||
if len(value) > 50:
|
||||
return value[:47] + "..."
|
||||
return value
|
||||
elif isinstance(value, (list, tuple)):
|
||||
return f"{type(value).__name__} with {len(value)} items"
|
||||
elif isinstance(value, dict):
|
||||
return f"dict with {len(value)} items"
|
||||
else:
|
||||
return f"{type(value).__name__}"
|
||||
|
||||
class LoggingToolkitWrapper:
|
||||
"""
|
||||
Wrapper class to add logging to toolkit methods.
|
||||
"""
|
||||
def __init__(self, toolkit):
|
||||
self.toolkit = toolkit
|
||||
self.toolkit_name = toolkit.__class__.__name__
|
||||
logging.info(f"📦 TOOLKIT INITIALIZED: {self.toolkit_name}")
|
||||
|
||||
def __getattr__(self, name):
|
||||
attr = getattr(self.toolkit, name)
|
||||
|
||||
if callable(attr) and not name.startswith('_'):
|
||||
if inspect.iscoroutinefunction(attr):
|
||||
# It's an async function, wrap it with our async decorator
|
||||
return log_tool_usage(attr)
|
||||
else:
|
||||
# For non-async functions
|
||||
@functools.wraps(attr)
|
||||
def wrapper(*args, **kwargs):
|
||||
logging.info(f"🔧 TOOL TRIGGERED: {self.toolkit_name}.{name}")
|
||||
try:
|
||||
# Sanitize arguments to avoid logging sensitive info
|
||||
safe_args = sanitize_args(args)
|
||||
safe_kwargs = {k: sanitize_value(v) for k, v in kwargs.items()}
|
||||
logging.info(f"🔍 TOOL ARGS: {name} called with {len(safe_kwargs)} parameters")
|
||||
|
||||
result = attr(*args, **kwargs)
|
||||
|
||||
logging.info(f"✅ TOOL COMPLETED: {self.toolkit_name}.{name}")
|
||||
return result
|
||||
except Exception as e:
|
||||
logging.error(f"❌ TOOL ERROR: {self.toolkit_name}.{name} - {str(e)}")
|
||||
raise
|
||||
return wrapper
|
||||
|
||||
return attr
|
||||
|
||||
def wrap_toolkits(toolkits_list):
|
||||
"""
|
||||
Wrap a list of toolkits with logging functionality.
|
||||
"""
|
||||
wrapped_toolkits = []
|
||||
for toolkit in toolkits_list:
|
||||
wrapped_toolkits.append(LoggingToolkitWrapper(toolkit))
|
||||
return wrapped_toolkits
|
||||
# Find this function in logging_utils.py and replace it with this corrected version
|
||||
|
||||
# Enhanced run_society function with logging
|
||||
def enhanced_run_society(society, verbose=True):
|
||||
"""
|
||||
Enhanced wrapper around the OWL run_society function with detailed logging.
|
||||
"""
|
||||
from owl.utils import run_society as original_run_society
|
||||
|
||||
# Log the society setup
|
||||
user_role = getattr(society, 'user_role_name', 'User')
|
||||
assistant_role = getattr(society, 'assistant_role_name', 'Assistant')
|
||||
|
||||
logging.info(f"🚀 STARTING AGENT SOCIETY: {user_role} & {assistant_role}")
|
||||
logging.info(f"📝 TASK: {society.task_prompt[:100]}...")
|
||||
|
||||
# Log agent initialization
|
||||
logging.info(f"🤖 INITIALIZING AGENT: {assistant_role}")
|
||||
|
||||
# Add hooks to log message exchanges if possible
|
||||
original_send_message = None
|
||||
if hasattr(society, 'assistant_agent') and hasattr(society.assistant_agent, 'send_message'):
|
||||
original_send_message = society.assistant_agent.send_message
|
||||
|
||||
@functools.wraps(original_send_message)
|
||||
def logged_send_message(*args, **kwargs):
|
||||
logging.info(f"💬 AGENT MESSAGE: {assistant_role} is processing...")
|
||||
result = original_send_message(*args, **kwargs)
|
||||
logging.info(f"📨 AGENT RESPONSE RECEIVED from {assistant_role}")
|
||||
return result
|
||||
|
||||
society.assistant_agent.send_message = logged_send_message
|
||||
|
||||
# Try to log tool usage if possible
|
||||
if hasattr(society, 'assistant_agent') and hasattr(society.assistant_agent, 'tools'):
|
||||
tools = getattr(society.assistant_agent, 'tools', [])
|
||||
logging.info(f"🧰 AGENT HAS {len(tools)} TOOLS AVAILABLE")
|
||||
|
||||
# Attempt to wrap each tool with logging
|
||||
for i, tool in enumerate(tools):
|
||||
if callable(tool):
|
||||
tool_name = getattr(tool, '__name__', f"tool_{i}")
|
||||
logging.info(f"🔧 TOOL AVAILABLE: {tool_name}")
|
||||
|
||||
# Run the original function
|
||||
start_time = time.time()
|
||||
try:
|
||||
logging.info(f"⏳ RUNNING SOCIETY...")
|
||||
# Remove the verbose parameter from the call to original_run_society
|
||||
answer, chat_history, token_count = original_run_society(society)
|
||||
end_time = time.time()
|
||||
duration = end_time - start_time
|
||||
|
||||
# Log prompt and completion tokens separately if available
|
||||
if isinstance(token_count, dict):
|
||||
prompt_tokens = token_count.get('prompt_token_count', 0)
|
||||
completion_tokens = token_count.get('completion_token_count', 0)
|
||||
logging.info(f"💰 TOKEN USAGE: Prompt={prompt_tokens}, Completion={completion_tokens}, Total={prompt_tokens + completion_tokens}")
|
||||
else:
|
||||
logging.info(f"💰 TOKEN USAGE: {token_count}")
|
||||
|
||||
logging.info(f"✅ AGENT SOCIETY COMPLETED: Duration {duration:.2f}s")
|
||||
|
||||
return answer, chat_history, token_count
|
||||
except Exception as e:
|
||||
logging.error(f"❌ AGENT SOCIETY ERROR: {str(e)}")
|
||||
raise
|
||||
finally:
|
||||
# Restore original method if we modified it
|
||||
if original_send_message and hasattr(society, 'assistant_agent'):
|
||||
society.assistant_agent.send_message = original_send_message
|
||||
|
||||
|
||||
|
||||
# Function to sanitize logs to avoid exposing sensitive information
|
||||
def sanitize_log(log_message):
|
||||
"""
|
||||
Sanitize log messages to avoid exposing sensitive information like IPs.
|
||||
"""
|
||||
# Simple IP address pattern matching
|
||||
ip_pattern = r'\b\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}\b'
|
||||
sanitized = re.sub(ip_pattern, '[REDACTED_IP]', log_message)
|
||||
|
||||
# Redact API keys (common patterns)
|
||||
api_key_pattern = r'(api[_-]?key|apikey|key|token)["\']?\s*[:=]\s*["\']?([a-zA-Z0-9]{20,})["\']?'
|
||||
sanitized = re.sub(api_key_pattern, r'\1: [REDACTED_API_KEY]', sanitized, flags=re.IGNORECASE)
|
||||
|
||||
# Redact URLs with authentication information
|
||||
url_auth_pattern = r'(https?://)([^:@/]+:[^@/]+@)([^\s/]+)'
|
||||
sanitized = re.sub(url_auth_pattern, r'\1[REDACTED_AUTH]@\3', sanitized)
|
||||
|
||||
return sanitized
|
||||
|
||||
# Enhanced StreamlitLogHandler that sanitizes logs
|
||||
class EnhancedStreamlitLogHandler(logging.Handler):
|
||||
def __init__(self, log_queue):
|
||||
super().__init__()
|
||||
self.log_queue = log_queue
|
||||
self.setFormatter(logging.Formatter('%(asctime)s - %(levelname)s - %(message)s'))
|
||||
|
||||
def emit(self, record):
|
||||
log_entry = self.format(record)
|
||||
# Sanitize the log to remove sensitive information
|
||||
sanitized_log = sanitize_log(log_entry)
|
||||
self.log_queue.put(sanitized_log)
|
||||
|
||||
# Add logging to specific OWL functions if possible
|
||||
# Add this updated function to logging_utils.py
|
||||
|
||||
# Add logging to specific OWL functions if possible
|
||||
def patch_owl_logging():
|
||||
"""Try to patch specific OWL functions to add logging."""
|
||||
try:
|
||||
from owl import utils
|
||||
|
||||
# If run_society exists in utils, patch it to log
|
||||
if hasattr(utils, 'run_society'):
|
||||
original_run = utils.run_society
|
||||
|
||||
def logged_run_society(*args, **kwargs):
|
||||
logging.info("🦉 OWL run_society called")
|
||||
try:
|
||||
result = original_run(*args, **kwargs)
|
||||
logging.info("🦉 OWL run_society completed")
|
||||
return result
|
||||
except Exception as e:
|
||||
logging.error(f"🦉 OWL run_society error: {str(e)}")
|
||||
raise
|
||||
|
||||
# Replace the original function
|
||||
utils.run_society = logged_run_society
|
||||
logging.info("🦉 OWL run_society patched with logging")
|
||||
|
||||
return True
|
||||
except ImportError:
|
||||
logging.warning("⚠️ Could not patch OWL logging - module not found")
|
||||
return False
|
||||
except Exception as e:
|
||||
logging.warning(f"⚠️ Error patching OWL logging: {str(e)}")
|
||||
return False
|
||||
335
community_usecase/OWL Interview Preparation Assistant/main.py
Normal file
335
community_usecase/OWL Interview Preparation Assistant/main.py
Normal file
@ -0,0 +1,335 @@
|
||||
#main.py
|
||||
import os
|
||||
import logging
|
||||
import time
|
||||
from typing import Dict, Any, Callable, Optional
|
||||
from pathlib import Path
|
||||
import sys
|
||||
|
||||
# Add parent directory to path for OWL imports
|
||||
sys.path.append('../')
|
||||
from dotenv import load_dotenv
|
||||
import numpy as np # Explicitly import numpy to avoid 'numpy' errors
|
||||
from camel.models import ModelFactory
|
||||
from camel.types import ModelPlatformType, ModelType
|
||||
from camel.toolkits import (
|
||||
SearchToolkit,
|
||||
BrowserToolkit,
|
||||
CodeExecutionToolkit
|
||||
)
|
||||
from camel.societies import RolePlaying
|
||||
from camel.configs import ChatGPTConfig
|
||||
from owl.utils import run_society # Official run_society with round_limit support
|
||||
|
||||
# Import prompt templates
|
||||
from config.prompts import (
|
||||
get_system_prompt,
|
||||
get_company_research_prompt,
|
||||
get_question_generator_prompt,
|
||||
get_preparation_plan_prompt
|
||||
)
|
||||
|
||||
# Set up logging
|
||||
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Create the output directory for interview preparation materials
|
||||
INTERVIEW_PREP_DIR = "./interview_prep"
|
||||
os.makedirs(INTERVIEW_PREP_DIR, exist_ok=True)
|
||||
|
||||
def run_society_with_strict_limit(society, round_limit=5, progress_callback=None):
|
||||
"""Wrapper around run_society to ensure round limit is strictly enforced
|
||||
|
||||
This implementation hijacks the step method to force termination after a specific number of rounds.
|
||||
"""
|
||||
# Track rounds manually
|
||||
round_count = 0
|
||||
|
||||
# Save original step function
|
||||
original_step = society.step
|
||||
|
||||
# Override the step method
|
||||
def limited_step(*args, **kwargs):
|
||||
nonlocal round_count
|
||||
round_count += 1
|
||||
|
||||
# Report progress if callback is provided
|
||||
if progress_callback and callable(progress_callback):
|
||||
progress_callback(round_count, round_limit)
|
||||
|
||||
# Force termination after reaching the round limit
|
||||
if round_count >= round_limit:
|
||||
logger.info(f"Reached round limit of {round_limit}, forcibly terminating.")
|
||||
# Force a TASK_DONE in the user response to trigger termination
|
||||
result = original_step(*args, **kwargs)
|
||||
if len(result) >= 2 and hasattr(result[1], 'msgs') and result[1].msgs and len(result[1].msgs) > 0:
|
||||
result[1].msgs[0].content += "\n\nTASK_DONE"
|
||||
result[1].terminated = True
|
||||
return result
|
||||
|
||||
return original_step(*args, **kwargs)
|
||||
|
||||
# Replace the step method
|
||||
society.step = limited_step
|
||||
|
||||
try:
|
||||
# Run the conversation with the standard run_society function
|
||||
answer, chat_history, token_count = run_society(society, round_limit=round_limit)
|
||||
|
||||
# Add a note about the conversation being truncated
|
||||
if len(chat_history) > 0 and "truncated_note" not in chat_history[-1]:
|
||||
chat_history[-1]["truncated_note"] = True
|
||||
if "assistant" in chat_history[-1]:
|
||||
chat_history[-1]["assistant"] += "\n\n[Note: This conversation was limited to maintain response quality.]"
|
||||
|
||||
return answer, chat_history, token_count
|
||||
|
||||
finally:
|
||||
# Restore the original step method
|
||||
society.step = original_step
|
||||
|
||||
def construct_interview_assistant(
|
||||
job_description: str,
|
||||
company_name: str,
|
||||
detailed: bool = True,
|
||||
limited_searches: bool = True
|
||||
) -> RolePlaying:
|
||||
"""
|
||||
Construct a specialized interview preparation assistant using OWL.
|
||||
"""
|
||||
# Select model based on environment variables
|
||||
if os.environ.get("OPENROUTER_API_KEY"):
|
||||
logger.info("Using OpenRouter with Gemini model")
|
||||
model = ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI_COMPATIBLE_MODEL,
|
||||
api_key=os.environ.get("OPENROUTER_API_KEY"),
|
||||
model_type="google/gemini-2.0-flash-001",
|
||||
url="https://openrouter.ai/api/v1",
|
||||
model_config_dict={
|
||||
"temperature": 0.6,
|
||||
"max_tokens": 4000, # Reduced from 10000 to avoid exceeding limits
|
||||
# Do NOT use context_length - it's not a valid API parameter
|
||||
}
|
||||
)
|
||||
elif os.environ.get("OPENAI_API_KEY"):
|
||||
logger.info("Using OpenAI model (GPT-4)")
|
||||
config = ChatGPTConfig(
|
||||
temperature=0.3,
|
||||
max_tokens=4000
|
||||
)
|
||||
model = ModelFactory.create(
|
||||
model_platform=ModelPlatformType.OPENAI,
|
||||
model_type=ModelType.GPT_4O,
|
||||
model_config_dict=config.as_dict()
|
||||
)
|
||||
else:
|
||||
raise ValueError("Either OPENAI_API_KEY or OPENROUTER_API_KEY must be set")
|
||||
|
||||
# Configure toolkits - Remove FileWriteToolkit as requested
|
||||
essential_tools = [
|
||||
SearchToolkit().search_duckduckgo,
|
||||
SearchToolkit().search_wiki,
|
||||
# Removed the FileWriteToolkit as requested
|
||||
]
|
||||
|
||||
if os.environ.get("GOOGLE_API_KEY") and os.environ.get("SEARCH_ENGINE_ID"):
|
||||
essential_tools.append(SearchToolkit().search_google)
|
||||
|
||||
if detailed:
|
||||
tools = [
|
||||
*essential_tools,
|
||||
*BrowserToolkit(
|
||||
headless=True,
|
||||
web_agent_model=model,
|
||||
planning_agent_model=model,
|
||||
).get_tools(),
|
||||
*CodeExecutionToolkit(sandbox="subprocess", verbose=True).get_tools(),
|
||||
]
|
||||
logger.info("Using full toolset for comprehensive results (detailed=True)")
|
||||
else:
|
||||
tools = essential_tools
|
||||
logger.info("Using essential toolset for faster results (detailed=False)")
|
||||
|
||||
user_agent_kwargs = {"model": model}
|
||||
assistant_agent_kwargs = {"model": model, "tools": tools}
|
||||
|
||||
# Build enhanced prompt asking for full, detailed output
|
||||
base_prompt = get_system_prompt()
|
||||
enhanced_prompt = f"""{base_prompt}
|
||||
Task: Help me prepare for an interview at {company_name} for the position of {job_description}.
|
||||
Requirements:
|
||||
1. Provide a highly detailed, extremely comprehensive response (aim for at least 2000+ words).
|
||||
2. Structure the output with clear sections, actionable insights, examples, and code where relevant.
|
||||
3. Tailor the content specifically to {company_name} and the {job_description} role.
|
||||
4. Do NOT truncate or summarize—provide the full explanation directly.
|
||||
"""
|
||||
|
||||
task_kwargs = {
|
||||
"task_prompt": enhanced_prompt,
|
||||
"with_task_specify": False,
|
||||
}
|
||||
|
||||
society = RolePlaying(
|
||||
**task_kwargs,
|
||||
user_role_name="job_seeker",
|
||||
user_agent_kwargs=user_agent_kwargs,
|
||||
assistant_role_name="interview_coach",
|
||||
assistant_agent_kwargs=assistant_agent_kwargs,
|
||||
)
|
||||
|
||||
# Try to set memory parameters to reduce context size
|
||||
try:
|
||||
# Try to access the context creator if it exists
|
||||
if hasattr(society, '_context_creator') and hasattr(society._context_creator, 'max_tokens'):
|
||||
society._context_creator.max_tokens = 4000
|
||||
# Alternative approach through kwargs if available
|
||||
elif hasattr(society, '_context_creator_kwargs'):
|
||||
society._context_creator_kwargs = {"max_tokens": 4000}
|
||||
except AttributeError:
|
||||
logger.warning("Could not directly set memory parameters. Using default values.")
|
||||
|
||||
return society
|
||||
|
||||
def research_company(
|
||||
company_name: str,
|
||||
detailed: bool = True,
|
||||
limited_searches: bool = True,
|
||||
progress_callback: Optional[Callable] = None
|
||||
) -> Dict[str, Any]:
|
||||
start_time = time.time()
|
||||
logging.info(f"Beginning company research for {company_name}")
|
||||
base_prompt = get_company_research_prompt(company_name)
|
||||
enhanced_prompt = f"""{base_prompt}
|
||||
|
||||
Please provide the most detailed, in-depth report possible, with no summarization or truncation.
|
||||
Your response must include extensive coverage, code samples (if relevant), and be at least 2000 words long.
|
||||
"""
|
||||
society = construct_interview_assistant("", company_name, detailed=detailed, limited_searches=limited_searches)
|
||||
society.task_prompt = enhanced_prompt
|
||||
|
||||
# Use our strict wrapper function to enforce limit at exactly 5 rounds
|
||||
answer, chat_history, token_count = run_society_with_strict_limit(
|
||||
society,
|
||||
round_limit=5,
|
||||
progress_callback=progress_callback
|
||||
)
|
||||
|
||||
duration = time.time() - start_time
|
||||
logging.info(f"Completed company research for {company_name} in {duration:.2f} seconds")
|
||||
|
||||
# Find any files that may have been generated
|
||||
generated_files = [str(file) for file in Path(INTERVIEW_PREP_DIR).glob("*") if file.is_file()]
|
||||
|
||||
return {
|
||||
"answer": answer,
|
||||
"chat_history": chat_history,
|
||||
"token_count": token_count,
|
||||
"generated_files": generated_files,
|
||||
"duration_seconds": duration
|
||||
}
|
||||
|
||||
def generate_interview_questions(
|
||||
job_role: str,
|
||||
company_name: str,
|
||||
detailed: bool = True,
|
||||
limited_searches: bool = True,
|
||||
progress_callback: Optional[Callable] = None
|
||||
) -> Dict[str, Any]:
|
||||
start_time = time.time()
|
||||
logging.info(f"Starting question generation for {job_role} at {company_name} (detailed={detailed})")
|
||||
|
||||
try:
|
||||
# Ensure numpy is available to prevent 'numpy' errors
|
||||
import numpy as np
|
||||
|
||||
base_prompt = get_question_generator_prompt(job_role, company_name)
|
||||
enhanced_prompt = f"""{base_prompt}
|
||||
|
||||
Please provide at least 50 highly specific questions with code examples, multiple solution approaches,
|
||||
and extremely thorough explanations. Aim for 3000+ words, with no truncation or summarization.
|
||||
"""
|
||||
society = construct_interview_assistant(job_role, company_name, detailed=detailed, limited_searches=limited_searches)
|
||||
society.task_prompt = enhanced_prompt
|
||||
|
||||
# Use our wrapper function to strictly enforce a limit of 5 rounds
|
||||
answer, chat_history, token_count = run_society_with_strict_limit(
|
||||
society,
|
||||
round_limit=5,
|
||||
progress_callback=progress_callback
|
||||
)
|
||||
|
||||
duration = time.time() - start_time
|
||||
logging.info(f"Completed question generation for {job_role} at {company_name} in {duration:.2f} seconds")
|
||||
|
||||
# Find any files that were generated
|
||||
generated_files = [str(file) for file in Path(INTERVIEW_PREP_DIR).glob("*") if file.is_file()]
|
||||
|
||||
return {
|
||||
"answer": answer,
|
||||
"chat_history": chat_history,
|
||||
"token_count": token_count,
|
||||
"generated_files": generated_files,
|
||||
"duration_seconds": duration
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error in question generation: {str(e)}", exc_info=True)
|
||||
raise
|
||||
|
||||
def create_interview_prep_plan(
|
||||
job_role: str,
|
||||
company_name: str,
|
||||
detailed: bool = True,
|
||||
limited_searches: bool = True,
|
||||
progress_callback: Optional[Callable] = None
|
||||
) -> Dict[str, Any]:
|
||||
start_time = time.time()
|
||||
logging.info(f"Starting preparation plan creation for {job_role} at {company_name} (detailed={detailed})")
|
||||
|
||||
try:
|
||||
base_prompt = get_preparation_plan_prompt(job_role, company_name)
|
||||
enhanced_prompt = f"""{base_prompt}
|
||||
|
||||
Please provide a highly thorough, step-by-step preparation plan with multiple days of tasks,
|
||||
detailed technical reviews, code examples where applicable, and at least 2000 words total.
|
||||
No truncation or summaries—include the full content.
|
||||
"""
|
||||
society = construct_interview_assistant(job_role, company_name, detailed=detailed, limited_searches=limited_searches)
|
||||
society.task_prompt = enhanced_prompt
|
||||
|
||||
# Use our wrapper function with strict limit of 5 rounds
|
||||
answer, chat_history, token_count = run_society_with_strict_limit(
|
||||
society,
|
||||
round_limit=5,
|
||||
progress_callback=progress_callback
|
||||
)
|
||||
|
||||
duration = time.time() - start_time
|
||||
logging.info(f"Completed preparation plan creation in {duration:.2f} seconds")
|
||||
|
||||
# Find any files that were generated
|
||||
generated_files = [str(file) for file in Path(INTERVIEW_PREP_DIR).glob("*") if file.is_file()]
|
||||
|
||||
return {
|
||||
"answer": answer,
|
||||
"chat_history": chat_history,
|
||||
"token_count": token_count,
|
||||
"generated_files": generated_files,
|
||||
"duration_seconds": duration
|
||||
}
|
||||
|
||||
except Exception as e:
|
||||
logging.error(f"Error in preparation plan creation: {str(e)}", exc_info=True)
|
||||
raise
|
||||
|
||||
if __name__ == "__main__":
|
||||
job_role = "Machine Learning Engineer"
|
||||
company_name = "Google"
|
||||
result = create_interview_prep_plan(job_role, company_name, detailed=True)
|
||||
print(f"Answer: {result['answer']}")
|
||||
print(f"Generated files: {result['generated_files']}")
|
||||
print(f"Execution time: {result['duration_seconds']:.2f} seconds")
|
||||
print(f"Conversation rounds: {len(result['chat_history'])}")
|
||||
@ -0,0 +1,25 @@
|
||||
# Core dependencies
|
||||
camel-ai[all]==0.2.35
|
||||
chunkr-ai>=0.0.41
|
||||
docx2markdown>=0.1.1
|
||||
streamlit>=1.24.0
|
||||
|
||||
# UI and visualization
|
||||
opencv-python>=4.7.0
|
||||
matplotlib>=3.7.1
|
||||
|
||||
# Data handling
|
||||
numpy>=1.24.3
|
||||
pandas>=2.0.2
|
||||
|
||||
# Utilities
|
||||
python-dotenv>=1.0.0
|
||||
requests>=2.31.0
|
||||
tqdm>=4.65.0
|
||||
|
||||
# Document processing
|
||||
PyPDF2>=3.0.0
|
||||
spacy>=3.5.3
|
||||
|
||||
# Install spaCy model
|
||||
# Run after pip install: python -m spacy download en_core_web_sm
|
||||
Loading…
x
Reference in New Issue
Block a user