param2004 commited on
Commit
bfade87
·
verified ·
1 Parent(s): 690bcb6

Update src/utils.py

Browse files
Files changed (1) hide show
  1. src/utils.py +14 -24
src/utils.py CHANGED
@@ -1,13 +1,13 @@
1
  import streamlit as st
2
  import pickle
3
  import pandas as pd
4
- from sentence_transformers import SentenceTransformer, models
5
- import torch
6
  import numpy as np
7
- from src.search import init_faiss
 
8
  from huggingface_hub import hf_hub_download
 
9
 
10
- # Repo IDs
11
  DATASET_REPO = "param2004/Medilingua-dataset"
12
  MODEL_REPO = "param2004/Medilingua-model"
13
 
@@ -15,47 +15,36 @@ MODEL_REPO = "param2004/Medilingua-model"
15
  def load_model():
16
  """Load SapBERT dynamically from Hugging Face Hub"""
17
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
18
-
19
  st.info(f"🔬 Loading SapBERT from Hugging Face Hub on {device.upper()}...")
20
 
21
- # Download model files dynamically
22
  try:
 
23
  model_path = hf_hub_download(
24
  repo_id=MODEL_REPO,
25
  filename="models/SapBERT-from-PubMedBERT-fulltext/pytorch_model.bin"
26
  )
27
 
28
- # Load SentenceTransformer as before
29
  word_embedding_model = models.Transformer(model_path)
30
  pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
31
  model = SentenceTransformer(modules=[word_embedding_model, pooling_model], device=device)
32
 
33
- st.success("✅ SapBERT loaded successfully from Hub.")
34
  except Exception as e:
35
- st.error(f"❌ Failed to load SapBERT from Hub. Details: {e}")
36
  st.warning("⚠️ Falling back to 'all-MiniLM-L6-v2' model.")
37
  model = SentenceTransformer('all-MiniLM-L6-v2', device=device)
38
 
39
  return model
40
 
41
-
42
  @st.cache_resource
43
  def load_data():
44
  """Load embeddings and dataset dynamically from Hugging Face Hub"""
45
  try:
46
- # Download embeddings
47
- question_emb_path = hf_hub_download(
48
- repo_id=DATASET_REPO,
49
- filename="dataset/question_embeddings.pkl"
50
- )
51
- doctor_emb_path = hf_hub_download(
52
- repo_id=DATASET_REPO,
53
- filename="dataset/doctor_embeddings.pkl"
54
- )
55
- dataset_csv_path = hf_hub_download(
56
- repo_id=DATASET_REPO,
57
- filename="dataset/dataset.csv"
58
- )
59
 
60
  # Load embeddings
61
  with open(question_emb_path, 'rb') as f:
@@ -66,11 +55,12 @@ def load_data():
66
  doctor_data = pickle.load(f)
67
  doctor_embeddings = doctor_data.get('embeddings').astype('float32')
68
 
69
- # Load CSV
70
  df = pd.read_csv(dataset_csv_path)
71
  df.dropna(subset=['Description', 'Patient', 'Doctor'], inplace=True)
72
  df.drop_duplicates(inplace=True)
73
 
 
74
  num_samples = min(len(df), len(question_embeddings), len(doctor_embeddings))
75
  df = df.iloc[:num_samples]
76
  question_embeddings = question_embeddings[:num_samples]
 
1
  import streamlit as st
2
  import pickle
3
  import pandas as pd
 
 
4
  import numpy as np
5
+ import torch
6
+ from sentence_transformers import SentenceTransformer, models
7
  from huggingface_hub import hf_hub_download
8
+ from src.search import init_faiss
9
 
10
+ # Hugging Face repo IDs
11
  DATASET_REPO = "param2004/Medilingua-dataset"
12
  MODEL_REPO = "param2004/Medilingua-model"
13
 
 
15
  def load_model():
16
  """Load SapBERT dynamically from Hugging Face Hub"""
17
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
18
  st.info(f"🔬 Loading SapBERT from Hugging Face Hub on {device.upper()}...")
19
 
 
20
  try:
21
+ # Download model files from Hub
22
  model_path = hf_hub_download(
23
  repo_id=MODEL_REPO,
24
  filename="models/SapBERT-from-PubMedBERT-fulltext/pytorch_model.bin"
25
  )
26
 
27
+ # Build SentenceTransformer manually
28
  word_embedding_model = models.Transformer(model_path)
29
  pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension())
30
  model = SentenceTransformer(modules=[word_embedding_model, pooling_model], device=device)
31
 
32
+ st.success("✅ SapBERT loaded successfully from Hugging Face Hub.")
33
  except Exception as e:
34
+ st.error(f"❌ Failed to load SapBERT from Hub: {e}")
35
  st.warning("⚠️ Falling back to 'all-MiniLM-L6-v2' model.")
36
  model = SentenceTransformer('all-MiniLM-L6-v2', device=device)
37
 
38
  return model
39
 
 
40
  @st.cache_resource
41
  def load_data():
42
  """Load embeddings and dataset dynamically from Hugging Face Hub"""
43
  try:
44
+ # Download embeddings & CSV from Hub
45
+ question_emb_path = hf_hub_download(DATASET_REPO, filename="dataset/question_embeddings.pkl")
46
+ doctor_emb_path = hf_hub_download(DATASET_REPO, filename="dataset/doctor_embeddings.pkl")
47
+ dataset_csv_path = hf_hub_download(DATASET_REPO, filename="dataset/dataset.csv")
 
 
 
 
 
 
 
 
 
48
 
49
  # Load embeddings
50
  with open(question_emb_path, 'rb') as f:
 
55
  doctor_data = pickle.load(f)
56
  doctor_embeddings = doctor_data.get('embeddings').astype('float32')
57
 
58
+ # Load dataset CSV
59
  df = pd.read_csv(dataset_csv_path)
60
  df.dropna(subset=['Description', 'Patient', 'Doctor'], inplace=True)
61
  df.drop_duplicates(inplace=True)
62
 
63
+ # Ensure all arrays align
64
  num_samples = min(len(df), len(question_embeddings), len(doctor_embeddings))
65
  df = df.iloc[:num_samples]
66
  question_embeddings = question_embeddings[:num_samples]