from langchain_ollama.llms import OllamaLLM
from langchain_core.prompts import ChatPromptTemplate
import json
from pprint import pprint
= OllamaLLM(model="gemma2:2b",
llm = 0)
temperature
Installation
On your terminal run the following command to install ollama module.
pip install ollama
Pull and run the model of you choice. For this tutorial we are running Gemma2 2B model, since it is small and powerful
ollama pull gemma2:2b
Install Langchain community module
pip install langchain-ollama
Loading Model
Gemma2:2b is used to build the agent, as it is small is size and powerful too.
Lets build LLM agent to translate one language to another.
= ChatPromptTemplate.from_messages(
prompt
[
("system",
"""
You are a resume generator.
You will generate the resume for a given user name and the job title.
You will print out the final resume that will be submitted to a job portal.
Make a four sections: Summary, Skills, Work Experience and Education.
Summary value is a strig.
Skill value is a dictionary object with section and list of skill.
Work experience value is a dictionary with work title and list of responsibilities.
There should be 10 responsibilities foe each work title.
Education value is a dictionary with degree level and information like graduated year and university name, location.
Provide all the output as json object only
""",
),
("human",
"""
user name: {user_name},
job title: {job_title},
bachelors: {bachelor_education},
masters: {master_education},
work experiences: {work_experiences}
"""
),
]
)
= prompt | llm
chain
= {
user_data "user_name": "Jhon Doe",
"job_title": "Machine Learning Engineer",
"bachelor_education": "University of New Mexico,2018, Bsc in Computer Science and Information System",
"master_education": "Cumberlands University, 2023, Msc in Information Technology and System",
"work_experiences": """ Fidelity, Machine learning Engineer|
Bank of America, BigData/Machine learning Engineer|
Penny Mac, Data Engineer|
Vitol, Data Engineer|
Bank of America, Backend Engineer|
UNM BBER, Fullstack Developer"""
}= ""
result async for chunk in chain.astream(user_data):
= result + chunk
result print(chunk, end="", flush=True)
```json
{
"Summary": "Highly motivated and results-oriented Machine Learning Engineer with a proven track record of developing innovative solutions using advanced algorithms. Expertise in data analysis, machine learning model development, and implementation across diverse industries.",
"Skills": {
"Technical Skills": [
"Machine Learning",
"Deep Learning",
"Python",
"TensorFlow",
"Scikit-learn",
"Data Visualization",
"Cloud Computing (AWS)",
"Big Data Analytics",
"SQL",
"R"
],
"Soft Skills": [
"Communication",
"Problem Solving",
"Teamwork",
"Adaptability",
"Critical Thinking",
"Time Management"
]
},
"Work Experience": {
"Fidelity": [
"Developed and implemented machine learning models to predict customer churn, resulting in a 10% reduction in churn rate.",
"Designed and built data pipelines for real-time data analysis, improving operational efficiency by 20%.",
"Collaborated with cross-functional teams to develop and deploy AI-powered solutions for fraud detection.",
"Utilized Python libraries like Pandas and NumPy for data manipulation and statistical analysis.",
"Conducted A/B testing on machine learning models to optimize model performance and accuracy.",
"Presented findings and recommendations to stakeholders, effectively communicating complex technical concepts.",
"Mentored junior engineers on best practices in machine learning development.",
"Participated in code reviews and provided constructive feedback to improve team code quality."
],
"Bank of America": [
"Developed and implemented machine learning models for credit risk assessment, leading to a 5% improvement in loan approval rates.",
"Designed and built data pipelines for real-time fraud detection, reducing fraudulent transactions by 15%.",
"Collaborated with data scientists to develop predictive models for customer segmentation and targeting.",
"Utilized SQL and Python libraries like Pandas and NumPy for data analysis and model development.",
"Conducted A/B testing on machine learning models to optimize model performance and accuracy.",
"Presented findings and recommendations to stakeholders, effectively communicating complex technical concepts.",
"Mentored junior engineers on best practices in machine learning development.",
"Participated in code reviews and provided constructive feedback to improve team code quality."
],
"Penny Mac": [
"Designed and implemented data pipelines for real-time data analysis, improving operational efficiency by 20%.",
"Developed and deployed machine learning models for predictive maintenance, reducing equipment downtime by 10%.",
"Collaborated with engineers to develop and implement data visualization dashboards.",
"Utilized Python libraries like Pandas and NumPy for data manipulation and statistical analysis.",
"Conducted A/B testing on machine learning models to optimize model performance and accuracy.",
"Presented findings and recommendations to stakeholders, effectively communicating complex technical concepts.",
"Mentored junior engineers on best practices in data engineering.",
"Participated in code reviews and provided constructive feedback to improve team code quality."
],
"Vitol": [
"Developed and implemented machine learning models for demand forecasting, improving inventory management by 15%.",
"Designed and built data pipelines for real-time market analysis, enabling faster decision making.",
"Collaborated with engineers to develop and implement data visualization dashboards.",
"Utilized Python libraries like Pandas and NumPy for data manipulation and statistical analysis.",
"Conducted A/B testing on machine learning models to optimize model performance and accuracy.",
"Presented findings and recommendations to stakeholders, effectively communicating complex technical concepts.",
"Mentored junior engineers on best practices in machine learning development.",
"Participated in code reviews and provided constructive feedback to improve team code quality."
],
"Bank of America": [
"Developed and implemented backend systems for financial applications using Java and Spring Boot.",
"Designed and built RESTful APIs for data integration and communication.",
"Collaborated with developers to implement new features and functionalities.",
"Utilized SQL and NoSQL databases for data storage and retrieval.",
"Conducted code reviews and provided constructive feedback to improve team code quality.",
"Participated in technical design discussions and contributed to architectural decisions."
],
"UNM BBER": [
"Developed full-stack web applications using React, Node.js, and Express.js.",
"Designed and implemented user interfaces for various functionalities.",
"Collaborated with team members to develop and deploy new features.",
"Utilized version control systems like Git for code management.",
"Conducted unit testing and integration testing to ensure application functionality.",
"Participated in technical design discussions and contributed to architectural decisions."
]
},
"Education": {
"Bachelor's Degree": {
"University": "University of New Mexico",
"Degree": "Bsc in Computer Science and Information System",
"Graduation Year": 2018
},
"Master's Degree": {
"University": "Cumberlands University",
"Degree": "Msc in Information Technology and System",
"Graduation Year": 2023
}
}
}
```
# Replace and assign back to original content
= result.replace("```json", "")
result = result.replace("```", "")
result
# load the string output as json output
= json.loads(result)
json_result
'Education'])
pprint(json_result["Skills"]) pprint(json_result[
{"Bachelor's Degree": {'Degree': 'Bsc in Computer Science and Information '
'System',
'Graduation Year': 2018,
'University': 'University of New Mexico'},
"Master's Degree": {'Degree': 'Msc in Information Technology and System',
'Graduation Year': 2023,
'University': 'Cumberlands University'}}
{'Soft Skills': ['Communication',
'Problem Solving',
'Teamwork',
'Adaptability',
'Critical Thinking',
'Time Management'],
'Technical Skills': ['Machine Learning',
'Deep Learning',
'Python',
'TensorFlow',
'Scikit-learn',
'Data Visualization',
'Cloud Computing (AWS)',
'Big Data Analytics',
'SQL',
'R']}
# TODO add task to llm agent
"""
Step 1: Scrape the job portal
Step 2: For each job listing requirement and users resume, generate a resume in json format using LLM
Step 3: Generate the Cover letter using LLM
Step 3: Convert the resume in docs or pdf format with a decent design
Step 4: Submit the job application or Send the resume and cover letter to company through email
"""
'\nStep 1: Scrape the job portal\nStep 2: For each job listing requirement and users resume, generate a resume in json format using LLM\nStep 3: Generate the Cover letter using LLM\nStep 3: Convert the resume in docs or pdf format with a decent design\nStep 4: Submit the job application or Send the resume and cover letter to company through email\n'