Priyanka-Ankam commited on
Commit
e46b39d
Β·
verified Β·
1 Parent(s): 8c9fb41

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +60 -58
app.py CHANGED
@@ -12,15 +12,19 @@ st.title("🧭 Team Career Progression Assistant")
12
  st.caption("Program Managers β€’ Scrum Masters β€’ People Leads")
13
 
14
  # ------------------------------------------------
15
- # Load Hugging Face Model (FLAN-T5 Small – light & free)
16
  # ------------------------------------------------
17
  @st.cache_resource
18
  def load_hf_model():
 
19
  return InferenceClient("google/gemma-1.1-2b-it")
20
 
21
  hf = load_hf_model()
22
 
23
- def safe_generate(prompt, tokens=200):
 
 
 
24
  try:
25
  response = hf.text_generation(
26
  prompt,
@@ -29,19 +33,55 @@ def safe_generate(prompt, tokens=200):
29
  do_sample=True
30
  )
31
 
32
- # Gemma returns {"generated_text": "..."}
33
  if isinstance(response, dict) and "generated_text" in response:
34
  return response["generated_text"].strip()
35
 
36
- return str(response).strip()
 
 
 
 
37
 
38
  except Exception as e:
39
  st.warning(f"LLM error: {e}")
40
  return None
41
 
 
 
 
 
 
 
 
 
 
 
42
 
43
  # ------------------------------------------------
44
- # Readiness Score & Recommendations
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
45
  # ------------------------------------------------
46
  def score(row):
47
  years = min(row["YearsExperience"], 10) / 10 * 5
@@ -50,10 +90,10 @@ def score(row):
50
  perf = row["PerformanceRating"]
51
  leadership = 5 if str(row["LeadershipInterest"]).lower() == "yes" else 2
52
 
53
- score_5 = (0.3 * years + 0.2 * tech + 0.2 * soft + 0.2 * perf + 0.1 * leadership)
54
  return round(score_5 / 5 * 100, 1)
55
 
56
- def suggest_next_role(row):
57
  s = row["ReadinessScore"]
58
  if s >= 80:
59
  return "Team Lead / Scrum Master"
@@ -71,7 +111,7 @@ def suggest_actions(row):
71
  return "β€’ " + "\nβ€’ ".join(acts)
72
 
73
  # ------------------------------------------------
74
- # Sidebar – Data Input
75
  # ------------------------------------------------
76
  with st.sidebar:
77
  st.header("πŸ“‚ Upload CSV or Use Sample")
@@ -92,15 +132,15 @@ st.subheader("πŸ“₯ Input Data")
92
  st.dataframe(df_raw, use_container_width=True)
93
 
94
  # ------------------------------------------------
95
- # Compute Metrics
96
  # ------------------------------------------------
97
  df = df_raw.copy()
98
  df["ReadinessScore"] = df.apply(score, axis=1)
99
- df["SuggestedNextRole"] = df.apply(suggest_next_role, axis=1)
100
  df["RecommendedActions"] = df.apply(suggest_actions, axis=1)
101
 
102
  # ------------------------------------------------
103
- # Team Summary (Table)
104
  # ------------------------------------------------
105
  st.markdown("## πŸ“Š Team Summary")
106
  st.dataframe(
@@ -109,25 +149,25 @@ st.dataframe(
109
  )
110
 
111
  # ------------------------------------------------
112
- # Dashboard (Charts)
113
  # ------------------------------------------------
114
  st.markdown("---")
115
  st.markdown("## πŸ“ˆ Team Dashboard")
116
 
117
- col1, col2 = st.columns(2)
118
 
119
- with col1:
120
  fig = px.bar(df, x="Name", y="ReadinessScore", text="ReadinessScore")
121
  st.plotly_chart(fig, use_container_width=True)
122
 
123
- with col2:
124
  role_counts = df["SuggestedNextRole"].value_counts().reset_index()
125
  role_counts.columns = ["Role", "Count"]
126
  fig2 = px.pie(role_counts, names="Role", values="Count")
127
  st.plotly_chart(fig2, use_container_width=True)
128
 
129
  # ------------------------------------------------
130
- # Employee-Level Recommendations (BOTTOM SECTION)
131
  # ------------------------------------------------
132
  st.markdown("---")
133
  st.markdown("## πŸ‘€ Employee-Level Recommendations & AI Plan")
@@ -137,52 +177,14 @@ emp = df[df["Name"] == selected_name].iloc[0]
137
 
138
  st.markdown(f"### {emp['Name']} β€” {emp['CurrentRole']}")
139
 
140
- # Non-AI info
141
- st.markdown("#### Readiness & Suggested Path")
142
  st.write(f"**Readiness Score:** {emp['ReadinessScore']} / 100")
143
- st.write(f"**Suggested Next Role (rule-based):** {emp['SuggestedNextRole']}")
144
  st.markdown("**Recommended Actions:**")
145
  st.write(emp["RecommendedActions"])
146
 
147
- # AI button
148
  if st.button("πŸš€ Generate AI Next Role & 30-60-90 Plan"):
149
- # ------------------------------------------------
150
- # Next-Level Role (LLM)
151
- # ------------------------------------------------
152
- def generate_next_role(current_role: str) -> str:
153
- prompt = (
154
- f"Suggest one realistic next-level job title for someone currently "
155
- f"working as a {current_role}. Return only the job title."
156
- )
157
- result = safe_generate(prompt, tokens=40)
158
- if not result or "Error" in result:
159
- return "Unable to generate"
160
- return result
161
-
162
- # ------------------------------------------------
163
- # 30-60-90 Plan (LLM)
164
- # ------------------------------------------------
165
- def generate_llm_plan(role: str, next_role: str) -> str:
166
- prompt = f"""
167
- Create a clear, structured 30-60-90 day development plan for someone currently
168
- working as a {role} who wants to grow into the next-level role: {next_role}.
169
-
170
- Follow this format:
171
-
172
- ### 30-Day Goals
173
- - ...
174
-
175
- ### 60-Day Goals
176
- - ...
177
-
178
- ### 90-Day Goals
179
- - ...
180
- """
181
- result = safe_generate(prompt, tokens=350)
182
- if not result or "Error" in result:
183
- return "Unable to generate plan."
184
- return result
185
-
186
  with st.spinner("Generating AI recommendations for this employee..."):
187
  next_role_ai = generate_next_role(emp["CurrentRole"])
188
  plan_ai = generate_llm_plan(emp["CurrentRole"], next_role_ai)
@@ -194,7 +196,7 @@ if st.button("πŸš€ Generate AI Next Role & 30-60-90 Plan"):
194
  st.write(plan_ai)
195
 
196
  # ------------------------------------------------
197
- # Download Results
198
  # ------------------------------------------------
199
  buf = StringIO()
200
  df.to_csv(buf, index=False)
 
12
  st.caption("Program Managers β€’ Scrum Masters β€’ People Leads")
13
 
14
  # ------------------------------------------------
15
+ # Load Hugging Face Model (OFFICIALLY SUPPORTED)
16
  # ------------------------------------------------
17
  @st.cache_resource
18
  def load_hf_model():
19
+ # This model works 100% with HF Inference API on Spaces
20
  return InferenceClient("google/gemma-1.1-2b-it")
21
 
22
  hf = load_hf_model()
23
 
24
+ # ------------------------------------------------
25
+ # Safe text generation wrapper (Gemma-compatible)
26
+ # ------------------------------------------------
27
+ def safe_generate(prompt: str, tokens: int = 200):
28
  try:
29
  response = hf.text_generation(
30
  prompt,
 
33
  do_sample=True
34
  )
35
 
36
+ # Gemma returns a dict: {"generated_text": "...output..."}
37
  if isinstance(response, dict) and "generated_text" in response:
38
  return response["generated_text"].strip()
39
 
40
+ # rare fallback
41
+ if isinstance(response, str):
42
+ return response.strip()
43
+
44
+ return str(response)
45
 
46
  except Exception as e:
47
  st.warning(f"LLM error: {e}")
48
  return None
49
 
50
+ # ------------------------------------------------
51
+ # Next-Level Role (AI)
52
+ # ------------------------------------------------
53
+ def generate_next_role(current_role: str) -> str:
54
+ prompt = f"""
55
+ Suggest one realistic next-level job title for someone currently working as a {current_role}.
56
+ Return only the job title. Do not explain.
57
+ """
58
+ result = safe_generate(prompt, tokens=40)
59
+ return result if result else "Unable to generate"
60
 
61
  # ------------------------------------------------
62
+ # 30-60-90 Day Plan (AI)
63
+ # ------------------------------------------------
64
+ def generate_llm_plan(role: str, next_role: str) -> str:
65
+ prompt = f"""
66
+ Create a clear, structured 30-60-90 day development plan for someone currently
67
+ in the role {role} who wants to grow into the next-level role: {next_role}.
68
+
69
+ Use this format:
70
+
71
+ ### 30-Day Goals
72
+ - ...
73
+
74
+ ### 60-Day Goals
75
+ - ...
76
+
77
+ ### 90-Day Goals
78
+ - ...
79
+ """
80
+ result = safe_generate(prompt, tokens=350)
81
+ return result if result else "Unable to generate plan."
82
+
83
+ # ------------------------------------------------
84
+ # Scoring Logic
85
  # ------------------------------------------------
86
  def score(row):
87
  years = min(row["YearsExperience"], 10) / 10 * 5
 
90
  perf = row["PerformanceRating"]
91
  leadership = 5 if str(row["LeadershipInterest"]).lower() == "yes" else 2
92
 
93
+ score_5 = (0.3*years + 0.2*tech + 0.2*soft + 0.2*perf + 0.1*leadership)
94
  return round(score_5 / 5 * 100, 1)
95
 
96
+ def suggest_next_role_rule(row):
97
  s = row["ReadinessScore"]
98
  if s >= 80:
99
  return "Team Lead / Scrum Master"
 
111
  return "β€’ " + "\nβ€’ ".join(acts)
112
 
113
  # ------------------------------------------------
114
+ # Sidebar File Input
115
  # ------------------------------------------------
116
  with st.sidebar:
117
  st.header("πŸ“‚ Upload CSV or Use Sample")
 
132
  st.dataframe(df_raw, use_container_width=True)
133
 
134
  # ------------------------------------------------
135
+ # Compute Columns
136
  # ------------------------------------------------
137
  df = df_raw.copy()
138
  df["ReadinessScore"] = df.apply(score, axis=1)
139
+ df["SuggestedNextRole"] = df.apply(suggest_next_role_rule, axis=1)
140
  df["RecommendedActions"] = df.apply(suggest_actions, axis=1)
141
 
142
  # ------------------------------------------------
143
+ # Summary Table
144
  # ------------------------------------------------
145
  st.markdown("## πŸ“Š Team Summary")
146
  st.dataframe(
 
149
  )
150
 
151
  # ------------------------------------------------
152
+ # Dashboard Charts
153
  # ------------------------------------------------
154
  st.markdown("---")
155
  st.markdown("## πŸ“ˆ Team Dashboard")
156
 
157
+ c1, c2 = st.columns(2)
158
 
159
+ with c1:
160
  fig = px.bar(df, x="Name", y="ReadinessScore", text="ReadinessScore")
161
  st.plotly_chart(fig, use_container_width=True)
162
 
163
+ with c2:
164
  role_counts = df["SuggestedNextRole"].value_counts().reset_index()
165
  role_counts.columns = ["Role", "Count"]
166
  fig2 = px.pie(role_counts, names="Role", values="Count")
167
  st.plotly_chart(fig2, use_container_width=True)
168
 
169
  # ------------------------------------------------
170
+ # Employee-Level AI Section (BOTTOM)
171
  # ------------------------------------------------
172
  st.markdown("---")
173
  st.markdown("## πŸ‘€ Employee-Level Recommendations & AI Plan")
 
177
 
178
  st.markdown(f"### {emp['Name']} β€” {emp['CurrentRole']}")
179
 
180
+ st.markdown("#### Readiness & Suggested Path (Rule-Based)")
 
181
  st.write(f"**Readiness Score:** {emp['ReadinessScore']} / 100")
182
+ st.write(f"**Suggested Next Role:** {emp['SuggestedNextRole']}")
183
  st.markdown("**Recommended Actions:**")
184
  st.write(emp["RecommendedActions"])
185
 
186
+ # AI Button
187
  if st.button("πŸš€ Generate AI Next Role & 30-60-90 Plan"):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
188
  with st.spinner("Generating AI recommendations for this employee..."):
189
  next_role_ai = generate_next_role(emp["CurrentRole"])
190
  plan_ai = generate_llm_plan(emp["CurrentRole"], next_role_ai)
 
196
  st.write(plan_ai)
197
 
198
  # ------------------------------------------------
199
+ # Download CSV
200
  # ------------------------------------------------
201
  buf = StringIO()
202
  df.to_csv(buf, index=False)