Priyanka-Ankam commited on
Commit
2557e47
·
verified ·
1 Parent(s): b0f4e3e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +37 -34
app.py CHANGED
@@ -4,7 +4,6 @@ import pandas as pd
4
  import plotly.express as px
5
  from io import StringIO
6
  from huggingface_hub import InferenceClient
7
- from openai import OpenAI
8
 
9
  # --------------------------
10
  # Streamlit Setup
@@ -17,55 +16,58 @@ st.set_page_config(
17
  st.title("🧭 Team Career Progression Assistant")
18
  st.caption("Program Managers • Scrum Masters • People Leads")
19
 
20
-
21
  # --------------------------
22
- # Global OpenAI client
23
  # --------------------------
24
- def get_client():
25
- api_key = os.getenv("OPENAI_API_KEY")
26
- if not api_key:
27
- st.error("Missing OPENAI_API_KEY in Hugging Face Secrets.")
28
- return None
29
- return OpenAI(api_key=api_key)
30
 
31
- client = get_client()
32
 
33
 
34
  # --------------------------
35
- # Auto-Generate Next Role (LLM)
36
  # --------------------------
37
  def generate_next_role(current_role: str) -> str:
38
- if client is None:
39
- return "Next Level Role"
40
-
41
- prompt = f"""
42
- What is the most likely next-level career role for someone currently working as a '{current_role}'?
43
-
44
- Provide ONLY the job title without explanation.
45
- """
46
 
47
  try:
48
- response = client.responses.create(
49
- model="gpt-4.1-mini",
50
- input=prompt,
51
- max_output_tokens=20
52
- )
53
- return response.output_text.strip()
54
-
55
  except Exception as e:
56
- st.error(f"Next Role LLM Error: {e}")
57
  return "Next Level Role"
58
 
59
 
60
  # =======================
61
- # OpenAI LLM – 30-60-90 Plan
62
  # =======================
63
- def generate_llm_plan(role, next_role):
64
- prompt = f"Create a 30-60-90 plan for a {role} moving to {next_role}."
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
65
  try:
66
- return hf_client.text_generation(prompt, max_new_tokens=180)
 
67
  except Exception as e:
68
- st.error(f"HF Plan Error: {e}")
69
  return "Could not generate plan."
70
 
71
 
@@ -129,7 +131,7 @@ with st.sidebar:
129
  file = st.file_uploader("Upload CSV", type=["csv"])
130
  use_sample = st.checkbox("Use sample data", value=True)
131
 
132
- st.header("🤖 LLM Options")
133
  use_llm = st.checkbox("Enable AI-generated 30-60-90 plans", value=False)
134
 
135
 
@@ -155,8 +157,9 @@ df["ReadinessScore"] = df.apply(score, axis=1)
155
  df["SuggestedNextRole"] = df.apply(suggest_next_role, axis=1)
156
  df["RecommendedActions"] = df.apply(suggest_actions, axis=1)
157
 
158
- # LLM Mode
159
  if use_llm:
 
160
  df["LLMPlan"] = df.apply(
161
  lambda row: generate_llm_plan(
162
  row["CurrentRole"],
 
4
  import plotly.express as px
5
  from io import StringIO
6
  from huggingface_hub import InferenceClient
 
7
 
8
  # --------------------------
9
  # Streamlit Setup
 
16
  st.title("🧭 Team Career Progression Assistant")
17
  st.caption("Program Managers • Scrum Masters • People Leads")
18
 
 
19
  # --------------------------
20
+ # HuggingFace Model (Free)
21
  # --------------------------
22
+ @st.cache_resource
23
+ def load_hf_model():
24
+ return InferenceClient("google/flan-t5-large") # strong + free
 
 
 
25
 
26
+ hf_client = load_hf_model()
27
 
28
 
29
  # --------------------------
30
+ # Auto-Generate Next Role (HF)
31
  # --------------------------
32
  def generate_next_role(current_role: str) -> str:
33
+ prompt = (
34
+ f"Suggest the most likely next-level job title for someone working as a {current_role}. "
35
+ "Return only the job title, no explanation."
36
+ )
 
 
 
 
37
 
38
  try:
39
+ result = hf_client.text_generation(prompt, max_new_tokens=30)
40
+ return result.strip()
 
 
 
 
 
41
  except Exception as e:
42
+ st.error(f"Next Role Generation Error: {e}")
43
  return "Next Level Role"
44
 
45
 
46
  # =======================
47
+ # Generate 30-60-90 Plan (HF)
48
  # =======================
49
+ def generate_llm_plan(role: str, next_role: str) -> str:
50
+ prompt = f"""
51
+ Create a detailed professional 30-60-90 Day Plan for someone currently working as a {role},
52
+ who is preparing to grow into the next-level role: {next_role}.
53
+
54
+ The response must follow this structure:
55
+
56
+ ### 30-Day Goals
57
+ - ...
58
+
59
+ ### 60-Day Goals
60
+ - ...
61
+
62
+ ### 90-Day Goals
63
+ - ...
64
+ """
65
+
66
  try:
67
+ result = hf_client.text_generation(prompt, max_new_tokens=350)
68
+ return result.strip()
69
  except Exception as e:
70
+ st.error(f"30-60-90 Plan Error: {e}")
71
  return "Could not generate plan."
72
 
73
 
 
131
  file = st.file_uploader("Upload CSV", type=["csv"])
132
  use_sample = st.checkbox("Use sample data", value=True)
133
 
134
+ st.header("🤖 AI Options")
135
  use_llm = st.checkbox("Enable AI-generated 30-60-90 plans", value=False)
136
 
137
 
 
157
  df["SuggestedNextRole"] = df.apply(suggest_next_role, axis=1)
158
  df["RecommendedActions"] = df.apply(suggest_actions, axis=1)
159
 
160
+ # AI Mode
161
  if use_llm:
162
+ st.info("Generating 30-60-90 plans using FREE HuggingFace models...")
163
  df["LLMPlan"] = df.apply(
164
  lambda row: generate_llm_plan(
165
  row["CurrentRole"],