Spaces:
Running
Running
Sadjad Alikhani
commited on
Update app.py
Browse files
app.py
CHANGED
|
@@ -176,8 +176,8 @@ def plot_confusion_matrix_from_csv(csv_file_path, title, save_path):
|
|
| 176 |
return Image.open(save_path)
|
| 177 |
|
| 178 |
# Function to load confusion matrix based on percentage and input_type
|
| 179 |
-
def display_confusion_matrices_los(
|
| 180 |
-
percentage = percentage_values_los[percentage_idx]
|
| 181 |
|
| 182 |
# Construct folder names
|
| 183 |
raw_folder = os.path.join(LOS_PATH, f"raw_{percentage/100:.3f}_los_noTraining")
|
|
@@ -200,13 +200,13 @@ def display_confusion_matrices_los(percentage_idx):
|
|
| 200 |
return raw_img, embeddings_img
|
| 201 |
|
| 202 |
# Main function to handle user choice
|
| 203 |
-
def handle_user_choice(choice,
|
| 204 |
if choice == "Use Default Dataset":
|
| 205 |
-
raw_img, embeddings_img = display_confusion_matrices_los(
|
| 206 |
return raw_img, embeddings_img, "" # Return empty string for console output
|
| 207 |
elif choice == "Upload Dataset":
|
| 208 |
if uploaded_file is not None:
|
| 209 |
-
raw_img, embeddings_img, console_output = process_hdf5_file(uploaded_file,
|
| 210 |
return raw_img, embeddings_img, console_output
|
| 211 |
else:
|
| 212 |
return "Please upload a dataset", "Please upload a dataset", "" # Return empty string for console output
|
|
@@ -227,8 +227,8 @@ class PrintCapture(io.StringIO):
|
|
| 227 |
return ''.join(self.output)
|
| 228 |
|
| 229 |
# Function to load and display predefined images based on user selection
|
| 230 |
-
def display_predefined_images(
|
| 231 |
-
percentage = percentage_values_los[percentage_idx]
|
| 232 |
raw_image_path = os.path.join(RAW_PATH, f"percentage_{percentage}_complexity_16.png")
|
| 233 |
embeddings_image_path = os.path.join(EMBEDDINGS_PATH, f"percentage_{percentage}_complexity_16.png")
|
| 234 |
|
|
@@ -245,12 +245,12 @@ def display_predefined_images(percentage_idx):
|
|
| 245 |
|
| 246 |
return raw_image, embeddings_image
|
| 247 |
|
| 248 |
-
def los_nlos_classification(file,
|
| 249 |
if file is not None:
|
| 250 |
-
raw_cm_image, emb_cm_image, console_output = process_hdf5_file(file,
|
| 251 |
return raw_cm_image, emb_cm_image, console_output # Returning all three: two images and console output
|
| 252 |
else:
|
| 253 |
-
raw_image, embeddings_image = display_predefined_images(
|
| 254 |
return raw_image, embeddings_image, "" # Return an empty string for console output when no file is uploaded
|
| 255 |
|
| 256 |
# Function to create random images for LoS/NLoS classification results
|
|
@@ -268,10 +268,10 @@ def load_module_from_path(module_name, file_path):
|
|
| 268 |
return module
|
| 269 |
|
| 270 |
# Function to split dataset into training and test sets based on user selection
|
| 271 |
-
def split_dataset(channels, labels,
|
| 272 |
-
percentage = percentage_values_los[percentage_idx] / 100
|
| 273 |
num_samples = channels.shape[0]
|
| 274 |
-
train_size = int(num_samples * percentage)
|
| 275 |
print(f'Number of Training Samples: {train_size}')
|
| 276 |
|
| 277 |
indices = np.arange(num_samples)
|
|
@@ -330,14 +330,15 @@ def plot_confusion_matrix(y_true, y_pred, title):
|
|
| 330 |
plt.savefig(f"{title}.png")
|
| 331 |
return Image.open(f"{title}.png")
|
| 332 |
|
| 333 |
-
def identical_train_test_split(output_emb, output_raw, labels,
|
| 334 |
N = output_emb.shape[0] # Get the total number of samples
|
| 335 |
|
| 336 |
# Generate the indices for shuffling and splitting
|
| 337 |
indices = torch.randperm(N) # Randomly shuffle the indices
|
| 338 |
|
| 339 |
# Calculate the split index
|
| 340 |
-
|
|
|
|
| 341 |
print(f'Training Size: {split_index}')
|
| 342 |
|
| 343 |
# Split indices into train and test
|
|
@@ -359,7 +360,7 @@ def identical_train_test_split(output_emb, output_raw, labels, percentage_idx):
|
|
| 359 |
# Store the original working directory when the app starts
|
| 360 |
original_dir = os.getcwd()
|
| 361 |
|
| 362 |
-
def process_hdf5_file(uploaded_file,
|
| 363 |
capture = PrintCapture()
|
| 364 |
sys.stdout = capture # Redirect print statements to capture
|
| 365 |
|
|
@@ -418,12 +419,13 @@ def process_hdf5_file(uploaded_file, percentage_idx):
|
|
| 418 |
print(f"Output Embeddings Shape: {output_emb.shape}")
|
| 419 |
print(f"Output Raw Shape: {output_raw.shape}")
|
| 420 |
|
| 421 |
-
print(f'percentage_idx: {percentage_idx}')
|
| 422 |
-
print(f'percentage_value: {percentage_values_los[percentage_idx]}')
|
|
|
|
| 423 |
train_data_emb, test_data_emb, train_data_raw, test_data_raw, train_labels, test_labels = identical_train_test_split(output_emb.view(len(output_emb),-1),
|
| 424 |
output_raw.view(len(output_raw),-1),
|
| 425 |
labels,
|
| 426 |
-
|
| 427 |
|
| 428 |
# Step 8: Perform classification using the Euclidean distance for both raw and embeddings
|
| 429 |
print(f'train_data_emb: {train_data_emb.shape}')
|
|
@@ -483,11 +485,11 @@ with gr.Blocks(css="""
|
|
| 483 |
# Dropdown for selecting percentage for predefined data
|
| 484 |
#percentage_dropdown_los = gr.Dropdown(choices=[f"{value:.3f}" for value in percentage_values_los], value=f"{percentage_values_los[0]:.3f}", label="Percentage of Data for Training")
|
| 485 |
#percentage_dropdown_los = gr.Dropdown(choices=list(range(20)), value=0, label="Percentage of Data for Training")
|
| 486 |
-
percentage_slider_los = gr.Slider(minimum=float(percentage_values_los[0]),
|
| 487 |
-
|
| 488 |
-
|
| 489 |
-
|
| 490 |
-
|
| 491 |
|
| 492 |
# File uploader for dataset (only visible if user chooses to upload a dataset)
|
| 493 |
file_input = gr.File(label="Upload HDF5 Dataset", file_types=[".h5"], visible=False)
|
|
|
|
| 176 |
return Image.open(save_path)
|
| 177 |
|
| 178 |
# Function to load confusion matrix based on percentage and input_type
|
| 179 |
+
def display_confusion_matrices_los(percentage):
|
| 180 |
+
#percentage = percentage_values_los[percentage_idx]
|
| 181 |
|
| 182 |
# Construct folder names
|
| 183 |
raw_folder = os.path.join(LOS_PATH, f"raw_{percentage/100:.3f}_los_noTraining")
|
|
|
|
| 200 |
return raw_img, embeddings_img
|
| 201 |
|
| 202 |
# Main function to handle user choice
|
| 203 |
+
def handle_user_choice(choice, percentage=None, uploaded_file=None):
|
| 204 |
if choice == "Use Default Dataset":
|
| 205 |
+
raw_img, embeddings_img = display_confusion_matrices_los(percentage)
|
| 206 |
return raw_img, embeddings_img, "" # Return empty string for console output
|
| 207 |
elif choice == "Upload Dataset":
|
| 208 |
if uploaded_file is not None:
|
| 209 |
+
raw_img, embeddings_img, console_output = process_hdf5_file(uploaded_file, percentage)
|
| 210 |
return raw_img, embeddings_img, console_output
|
| 211 |
else:
|
| 212 |
return "Please upload a dataset", "Please upload a dataset", "" # Return empty string for console output
|
|
|
|
| 227 |
return ''.join(self.output)
|
| 228 |
|
| 229 |
# Function to load and display predefined images based on user selection
|
| 230 |
+
def display_predefined_images(percentage):
|
| 231 |
+
#percentage = percentage_values_los[percentage_idx]
|
| 232 |
raw_image_path = os.path.join(RAW_PATH, f"percentage_{percentage}_complexity_16.png")
|
| 233 |
embeddings_image_path = os.path.join(EMBEDDINGS_PATH, f"percentage_{percentage}_complexity_16.png")
|
| 234 |
|
|
|
|
| 245 |
|
| 246 |
return raw_image, embeddings_image
|
| 247 |
|
| 248 |
+
def los_nlos_classification(file, percentage):
|
| 249 |
if file is not None:
|
| 250 |
+
raw_cm_image, emb_cm_image, console_output = process_hdf5_file(file, percentage)
|
| 251 |
return raw_cm_image, emb_cm_image, console_output # Returning all three: two images and console output
|
| 252 |
else:
|
| 253 |
+
raw_image, embeddings_image = display_predefined_images(percentage)
|
| 254 |
return raw_image, embeddings_image, "" # Return an empty string for console output when no file is uploaded
|
| 255 |
|
| 256 |
# Function to create random images for LoS/NLoS classification results
|
|
|
|
| 268 |
return module
|
| 269 |
|
| 270 |
# Function to split dataset into training and test sets based on user selection
|
| 271 |
+
def split_dataset(channels, labels, percentage):
|
| 272 |
+
#percentage = percentage_values_los[percentage_idx] / 100
|
| 273 |
num_samples = channels.shape[0]
|
| 274 |
+
train_size = int(num_samples * percentage/100)
|
| 275 |
print(f'Number of Training Samples: {train_size}')
|
| 276 |
|
| 277 |
indices = np.arange(num_samples)
|
|
|
|
| 330 |
plt.savefig(f"{title}.png")
|
| 331 |
return Image.open(f"{title}.png")
|
| 332 |
|
| 333 |
+
def identical_train_test_split(output_emb, output_raw, labels, percentage):
|
| 334 |
N = output_emb.shape[0] # Get the total number of samples
|
| 335 |
|
| 336 |
# Generate the indices for shuffling and splitting
|
| 337 |
indices = torch.randperm(N) # Randomly shuffle the indices
|
| 338 |
|
| 339 |
# Calculate the split index
|
| 340 |
+
#percentage = percentage_values_los[percentage_idx]
|
| 341 |
+
split_index = int(N * percentage/100)
|
| 342 |
print(f'Training Size: {split_index}')
|
| 343 |
|
| 344 |
# Split indices into train and test
|
|
|
|
| 360 |
# Store the original working directory when the app starts
|
| 361 |
original_dir = os.getcwd()
|
| 362 |
|
| 363 |
+
def process_hdf5_file(uploaded_file, percentage):
|
| 364 |
capture = PrintCapture()
|
| 365 |
sys.stdout = capture # Redirect print statements to capture
|
| 366 |
|
|
|
|
| 419 |
print(f"Output Embeddings Shape: {output_emb.shape}")
|
| 420 |
print(f"Output Raw Shape: {output_raw.shape}")
|
| 421 |
|
| 422 |
+
#print(f'percentage_idx: {percentage_idx}')
|
| 423 |
+
#print(f'percentage_value: {percentage_values_los[percentage_idx]}')
|
| 424 |
+
print(f'percentage_value: {percentage}')
|
| 425 |
train_data_emb, test_data_emb, train_data_raw, test_data_raw, train_labels, test_labels = identical_train_test_split(output_emb.view(len(output_emb),-1),
|
| 426 |
output_raw.view(len(output_raw),-1),
|
| 427 |
labels,
|
| 428 |
+
percentage)
|
| 429 |
|
| 430 |
# Step 8: Perform classification using the Euclidean distance for both raw and embeddings
|
| 431 |
print(f'train_data_emb: {train_data_emb.shape}')
|
|
|
|
| 485 |
# Dropdown for selecting percentage for predefined data
|
| 486 |
#percentage_dropdown_los = gr.Dropdown(choices=[f"{value:.3f}" for value in percentage_values_los], value=f"{percentage_values_los[0]:.3f}", label="Percentage of Data for Training")
|
| 487 |
#percentage_dropdown_los = gr.Dropdown(choices=list(range(20)), value=0, label="Percentage of Data for Training")
|
| 488 |
+
#percentage_slider_los = gr.Slider(minimum=float(percentage_values_los[0]),
|
| 489 |
+
# maximum=float(percentage_values_los[-1]),
|
| 490 |
+
# step=float(percentage_values_los[1] - percentage_values_los[0]),
|
| 491 |
+
# value=float(percentage_values_los[0]),
|
| 492 |
+
# label="Percentage of Data for Training")
|
| 493 |
|
| 494 |
# File uploader for dataset (only visible if user chooses to upload a dataset)
|
| 495 |
file_input = gr.File(label="Upload HDF5 Dataset", file_types=[".h5"], visible=False)
|