Spaces:
Sleeping
Sleeping
File size: 6,901 Bytes
9aafdc3 8a7c495 9aafdc3 0fe6bb8 adcf5c9 0fe6bb8 e376652 9aafdc3 0fe6bb8 e46b39d 0fe6bb8 2557e47 e46b39d 8c9fb41 169343d e376652 b927e3c e46b39d e03b2e4 8c9fb41 7d67e24 8c9fb41 7d67e24 e46b39d c2bced0 e46b39d 7d67e24 e03b2e4 adcf5c9 e376652 e46b39d 8c9fb41 0fe6bb8 e46b39d 0fe6bb8 9aafdc3 e46b39d 9aafdc3 e46b39d 0fe6bb8 9aafdc3 0fe6bb8 e376652 0fe6bb8 e376652 9aafdc3 0fe6bb8 e376652 9aafdc3 0fe6bb8 e46b39d 0fe6bb8 9aafdc3 e376652 9aafdc3 0fe6bb8 b927e3c 0fe6bb8 9aafdc3 169343d 9aafdc3 169343d 0fe6bb8 e46b39d 0fe6bb8 c9a1ffc e46b39d c9a1ffc 0fe6bb8 e46b39d 0fe6bb8 e376652 9aafdc3 e376652 9aafdc3 0fe6bb8 e46b39d 0fe6bb8 9aafdc3 e376652 9aafdc3 e46b39d adcf5c9 e46b39d 9aafdc3 adcf5c9 e46b39d 0fe6bb8 9aafdc3 0fe6bb8 e46b39d adcf5c9 e46b39d adcf5c9 e46b39d adcf5c9 e46b39d adcf5c9 e46b39d 0fe6bb8 e376652 9aafdc3 e376652 9aafdc3 e376652 b927e3c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 |
import streamlit as st
import pandas as pd
import plotly.express as px
from io import StringIO
from huggingface_hub import InferenceClient
# ------------------------------------------------
# App Config
# ------------------------------------------------
st.set_page_config(page_title="Team Career Progression Assistant", layout="wide")
st.title("π§ Team Career Progression Assistant")
st.caption("Program Managers β’ Scrum Masters β’ People Leads")
# ------------------------------------------------
# Load Hugging Face Model (OFFICIALLY SUPPORTED)
# ------------------------------------------------
@st.cache_resource
def load_hf_model():
# This model works 100% with HF Inference API on Spaces
return InferenceClient("google/gemma-1.1-2b-it")
hf = load_hf_model()
# ------------------------------------------------
# Safe text generation wrapper (Gemma-compatible)
# ------------------------------------------------
def safe_generate(prompt: str, tokens: int = 200):
try:
response = hf.text_generation(
prompt,
max_new_tokens=tokens,
temperature=0.7,
do_sample=True
)
# Gemma returns a dict: {"generated_text": "...output..."}
if isinstance(response, dict) and "generated_text" in response:
return response["generated_text"].strip()
# rare fallback
if isinstance(response, str):
return response.strip()
return str(response)
except Exception as e:
st.warning(f"LLM error: {e}")
return None
# ------------------------------------------------
# Next-Level Role (AI)
# ------------------------------------------------
def generate_next_role(current_role: str) -> str:
prompt = f"""
Suggest one realistic next-level job title for someone currently working as a {current_role}.
Return only the job title. Do not explain.
"""
result = safe_generate(prompt, tokens=40)
return result if result else "Unable to generate"
# ------------------------------------------------
# 30-60-90 Day Plan (AI)
# ------------------------------------------------
def generate_llm_plan(role: str, next_role: str) -> str:
prompt = f"""
Create a clear, structured 30-60-90 day development plan for someone currently
in the role {role} who wants to grow into the next-level role: {next_role}.
Use this format:
### 30-Day Goals
- ...
### 60-Day Goals
- ...
### 90-Day Goals
- ...
"""
result = safe_generate(prompt, tokens=350)
return result if result else "Unable to generate plan."
# ------------------------------------------------
# Scoring Logic
# ------------------------------------------------
def score(row):
years = min(row["YearsExperience"], 10) / 10 * 5
tech = row["TechSkillRating"]
soft = row["SoftSkillRating"]
perf = row["PerformanceRating"]
leadership = 5 if str(row["LeadershipInterest"]).lower() == "yes" else 2
score_5 = (0.3*years + 0.2*tech + 0.2*soft + 0.2*perf + 0.1*leadership)
return round(score_5 / 5 * 100, 1)
def suggest_next_role_rule(row):
s = row["ReadinessScore"]
if s >= 80:
return "Team Lead / Scrum Master"
elif s >= 60:
return f"Mid-level {row['CurrentRole']}"
return "Upskill Current Role"
def suggest_actions(row):
if row["ReadinessScore"] >= 80:
acts = ["Lead initiatives", "Mentor juniors", "Improve decision-making"]
elif row["ReadinessScore"] >= 60:
acts = ["Improve technical skills", "Own a module", "Drive improvements"]
else:
acts = ["Improve consistency", "Weekly mentoring", "Earn certifications"]
return "β’ " + "\nβ’ ".join(acts)
# ------------------------------------------------
# Sidebar File Input
# ------------------------------------------------
with st.sidebar:
st.header("π Upload CSV or Use Sample")
file = st.file_uploader("Upload CSV", type=["csv"])
use_sample = st.checkbox("Use Sample Data", value=True)
# ------------------------------------------------
# Load Data
# ------------------------------------------------
if file:
df_raw = pd.read_csv(file)
elif use_sample:
df_raw = pd.read_csv("sample_data/team_members_sample.csv")
else:
st.stop()
st.subheader("π₯ Input Data")
st.dataframe(df_raw, use_container_width=True)
# ------------------------------------------------
# Compute Columns
# ------------------------------------------------
df = df_raw.copy()
df["ReadinessScore"] = df.apply(score, axis=1)
df["SuggestedNextRole"] = df.apply(suggest_next_role_rule, axis=1)
df["RecommendedActions"] = df.apply(suggest_actions, axis=1)
# ------------------------------------------------
# Summary Table
# ------------------------------------------------
st.markdown("## π Team Summary")
st.dataframe(
df[["Name", "CurrentRole", "YearsExperience", "ReadinessScore", "SuggestedNextRole"]],
use_container_width=True,
)
# ------------------------------------------------
# Dashboard Charts
# ------------------------------------------------
st.markdown("---")
st.markdown("## π Team Dashboard")
c1, c2 = st.columns(2)
with c1:
fig = px.bar(df, x="Name", y="ReadinessScore", text="ReadinessScore")
st.plotly_chart(fig, use_container_width=True)
with c2:
role_counts = df["SuggestedNextRole"].value_counts().reset_index()
role_counts.columns = ["Role", "Count"]
fig2 = px.pie(role_counts, names="Role", values="Count")
st.plotly_chart(fig2, use_container_width=True)
# ------------------------------------------------
# Employee-Level AI Section (BOTTOM)
# ------------------------------------------------
st.markdown("---")
st.markdown("## π€ Employee-Level Recommendations & AI Plan")
selected_name = st.selectbox("Select employee:", df["Name"].unique())
emp = df[df["Name"] == selected_name].iloc[0]
st.markdown(f"### {emp['Name']} β {emp['CurrentRole']}")
st.markdown("#### Readiness & Suggested Path (Rule-Based)")
st.write(f"**Readiness Score:** {emp['ReadinessScore']} / 100")
st.write(f"**Suggested Next Role:** {emp['SuggestedNextRole']}")
st.markdown("**Recommended Actions:**")
st.write(emp["RecommendedActions"])
# AI Button
if st.button("π Generate AI Next Role & 30-60-90 Plan"):
with st.spinner("Generating AI recommendations for this employee..."):
next_role_ai = generate_next_role(emp["CurrentRole"])
plan_ai = generate_llm_plan(emp["CurrentRole"], next_role_ai)
st.markdown("### πΌ Predicted Next-Level Role (AI)")
st.success(next_role_ai)
st.markdown("### ποΈ 30-60-90 Day Career Plan (AI)")
st.write(plan_ai)
# ------------------------------------------------
# Download CSV
# ------------------------------------------------
buf = StringIO()
df.to_csv(buf, index=False)
st.download_button(
"π₯ Download Full Results CSV",
buf.getvalue(),
"career_progression_results.csv",
"text/csv",
)
|