Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import pandas as pd | |
| import plotly.express as px | |
| from io import StringIO | |
| from huggingface_hub import InferenceClient | |
| # ------------------------------------------------ | |
| # App Config | |
| # ------------------------------------------------ | |
| st.set_page_config(page_title="Team Career Progression Assistant", layout="wide") | |
| st.title("π§ Team Career Progression Assistant") | |
| st.caption("Program Managers β’ Scrum Masters β’ People Leads") | |
| # ------------------------------------------------ | |
| # Load Hugging Face Model (OFFICIALLY SUPPORTED) | |
| # ------------------------------------------------ | |
| def load_hf_model(): | |
| # This model works 100% with HF Inference API on Spaces | |
| return InferenceClient("google/gemma-1.1-2b-it") | |
| hf = load_hf_model() | |
| # ------------------------------------------------ | |
| # Safe text generation wrapper (Gemma-compatible) | |
| # ------------------------------------------------ | |
| def safe_generate(prompt: str, tokens: int = 200): | |
| try: | |
| response = hf.text_generation( | |
| prompt, | |
| max_new_tokens=tokens, | |
| temperature=0.7, | |
| do_sample=True | |
| ) | |
| # Gemma returns a dict: {"generated_text": "...output..."} | |
| if isinstance(response, dict) and "generated_text" in response: | |
| return response["generated_text"].strip() | |
| # rare fallback | |
| if isinstance(response, str): | |
| return response.strip() | |
| return str(response) | |
| except Exception as e: | |
| st.warning(f"LLM error: {e}") | |
| return None | |
| # ------------------------------------------------ | |
| # Next-Level Role (AI) | |
| # ------------------------------------------------ | |
| def generate_next_role(current_role: str) -> str: | |
| prompt = f""" | |
| Suggest one realistic next-level job title for someone currently working as a {current_role}. | |
| Return only the job title. Do not explain. | |
| """ | |
| result = safe_generate(prompt, tokens=40) | |
| return result if result else "Unable to generate" | |
| # ------------------------------------------------ | |
| # 30-60-90 Day Plan (AI) | |
| # ------------------------------------------------ | |
| def generate_llm_plan(role: str, next_role: str) -> str: | |
| prompt = f""" | |
| Create a clear, structured 30-60-90 day development plan for someone currently | |
| in the role {role} who wants to grow into the next-level role: {next_role}. | |
| Use this format: | |
| ### 30-Day Goals | |
| - ... | |
| ### 60-Day Goals | |
| - ... | |
| ### 90-Day Goals | |
| - ... | |
| """ | |
| result = safe_generate(prompt, tokens=350) | |
| return result if result else "Unable to generate plan." | |
| # ------------------------------------------------ | |
| # Scoring Logic | |
| # ------------------------------------------------ | |
| def score(row): | |
| years = min(row["YearsExperience"], 10) / 10 * 5 | |
| tech = row["TechSkillRating"] | |
| soft = row["SoftSkillRating"] | |
| perf = row["PerformanceRating"] | |
| leadership = 5 if str(row["LeadershipInterest"]).lower() == "yes" else 2 | |
| score_5 = (0.3*years + 0.2*tech + 0.2*soft + 0.2*perf + 0.1*leadership) | |
| return round(score_5 / 5 * 100, 1) | |
| def suggest_next_role_rule(row): | |
| s = row["ReadinessScore"] | |
| if s >= 80: | |
| return "Team Lead / Scrum Master" | |
| elif s >= 60: | |
| return f"Mid-level {row['CurrentRole']}" | |
| return "Upskill Current Role" | |
| def suggest_actions(row): | |
| if row["ReadinessScore"] >= 80: | |
| acts = ["Lead initiatives", "Mentor juniors", "Improve decision-making"] | |
| elif row["ReadinessScore"] >= 60: | |
| acts = ["Improve technical skills", "Own a module", "Drive improvements"] | |
| else: | |
| acts = ["Improve consistency", "Weekly mentoring", "Earn certifications"] | |
| return "β’ " + "\nβ’ ".join(acts) | |
| # ------------------------------------------------ | |
| # Sidebar File Input | |
| # ------------------------------------------------ | |
| with st.sidebar: | |
| st.header("π Upload CSV or Use Sample") | |
| file = st.file_uploader("Upload CSV", type=["csv"]) | |
| use_sample = st.checkbox("Use Sample Data", value=True) | |
| # ------------------------------------------------ | |
| # Load Data | |
| # ------------------------------------------------ | |
| if file: | |
| df_raw = pd.read_csv(file) | |
| elif use_sample: | |
| df_raw = pd.read_csv("sample_data/team_members_sample.csv") | |
| else: | |
| st.stop() | |
| st.subheader("π₯ Input Data") | |
| st.dataframe(df_raw, use_container_width=True) | |
| # ------------------------------------------------ | |
| # Compute Columns | |
| # ------------------------------------------------ | |
| df = df_raw.copy() | |
| df["ReadinessScore"] = df.apply(score, axis=1) | |
| df["SuggestedNextRole"] = df.apply(suggest_next_role_rule, axis=1) | |
| df["RecommendedActions"] = df.apply(suggest_actions, axis=1) | |
| # ------------------------------------------------ | |
| # Summary Table | |
| # ------------------------------------------------ | |
| st.markdown("## π Team Summary") | |
| st.dataframe( | |
| df[["Name", "CurrentRole", "YearsExperience", "ReadinessScore", "SuggestedNextRole"]], | |
| use_container_width=True, | |
| ) | |
| # ------------------------------------------------ | |
| # Dashboard Charts | |
| # ------------------------------------------------ | |
| st.markdown("---") | |
| st.markdown("## π Team Dashboard") | |
| c1, c2 = st.columns(2) | |
| with c1: | |
| fig = px.bar(df, x="Name", y="ReadinessScore", text="ReadinessScore") | |
| st.plotly_chart(fig, use_container_width=True) | |
| with c2: | |
| role_counts = df["SuggestedNextRole"].value_counts().reset_index() | |
| role_counts.columns = ["Role", "Count"] | |
| fig2 = px.pie(role_counts, names="Role", values="Count") | |
| st.plotly_chart(fig2, use_container_width=True) | |
| # ------------------------------------------------ | |
| # Employee-Level AI Section (BOTTOM) | |
| # ------------------------------------------------ | |
| st.markdown("---") | |
| st.markdown("## π€ Employee-Level Recommendations & AI Plan") | |
| selected_name = st.selectbox("Select employee:", df["Name"].unique()) | |
| emp = df[df["Name"] == selected_name].iloc[0] | |
| st.markdown(f"### {emp['Name']} β {emp['CurrentRole']}") | |
| st.markdown("#### Readiness & Suggested Path (Rule-Based)") | |
| st.write(f"**Readiness Score:** {emp['ReadinessScore']} / 100") | |
| st.write(f"**Suggested Next Role:** {emp['SuggestedNextRole']}") | |
| st.markdown("**Recommended Actions:**") | |
| st.write(emp["RecommendedActions"]) | |
| # AI Button | |
| if st.button("π Generate AI Next Role & 30-60-90 Plan"): | |
| with st.spinner("Generating AI recommendations for this employee..."): | |
| next_role_ai = generate_next_role(emp["CurrentRole"]) | |
| plan_ai = generate_llm_plan(emp["CurrentRole"], next_role_ai) | |
| st.markdown("### πΌ Predicted Next-Level Role (AI)") | |
| st.success(next_role_ai) | |
| st.markdown("### ποΈ 30-60-90 Day Career Plan (AI)") | |
| st.write(plan_ai) | |
| # ------------------------------------------------ | |
| # Download CSV | |
| # ------------------------------------------------ | |
| buf = StringIO() | |
| df.to_csv(buf, index=False) | |
| st.download_button( | |
| "π₯ Download Full Results CSV", | |
| buf.getvalue(), | |
| "career_progression_results.csv", | |
| "text/csv", | |
| ) | |