Calculate Weighted Accuracy Python

Weighted Accuracy Calculator (Python) | Calculate Weighted Accuracy :root { –primary-color: #004a99; –success-color: #28a745; –background-color: #f8f9fa; –text-color: #333; –border-color: #ddd; –shadow-color: rgba(0, 0, 0, 0.1); –card-background: #ffffff; –error-color: #dc3545; } body { font-family: 'Segoe UI', Tahoma, Geneva, Verdana, sans-serif; background-color: var(–background-color); color: var(–text-color); margin: 0; padding: 20px; line-height: 1.6; } .container { max-width: 1000px; margin: 20px auto; background-color: var(–card-background); padding: 30px; border-radius: 8px; box-shadow: 0 4px 15px var(–shadow-color); border: 1px solid var(–border-color); } h1, h2, h3 { color: var(–primary-color); text-align: center; margin-bottom: 20px; } h1 { font-size: 2.5em; } h2 { font-size: 1.8em; border-bottom: 2px solid var(–primary-color); padding-bottom: 10px; margin-top: 40px; } h3 { font-size: 1.4em; margin-top: 30px; color: #555; } .input-group { margin-bottom: 20px; padding: 15px; border: 1px solid var(–border-color); border-radius: 6px; background-color: #fdfdfd; } .input-group label { display: block; margin-bottom: 8px; font-weight: bold; color: var(–primary-color); } .input-group input[type="number"], .input-group input[type="text"] { width: calc(100% – 22px); /* Adjust for padding and border */ padding: 10px; border: 1px solid var(–border-color); border-radius: 4px; box-sizing: border-box; font-size: 1em; } .input-group input[type="number"]:focus, .input-group input[type="text"]:focus { border-color: var(–primary-color); outline: none; box-shadow: 0 0 5px rgba(0, 74, 153, 0.3); } .input-group .helper-text { font-size: 0.85em; color: #6c757d; margin-top: 5px; } .input-group .error-message { color: var(–error-color); font-size: 0.8em; margin-top: 5px; display: block; /* Ensure it takes up space */ min-height: 1.2em; /* Prevent layout shifts */ } .button-group { display: flex; justify-content: space-between; gap: 10px; margin-top: 30px; } button { padding: 12px 20px; border: none; border-radius: 5px; cursor: pointer; font-size: 1em; font-weight: bold; transition: background-color 0.3s ease, transform 0.2s ease; } button.primary { background-color: var(–primary-color); color: white; } button.primary:hover { background-color: #003a7a; transform: translateY(-2px); } button.success { background-color: var(–success-color); color: white; } button.success:hover { background-color: #218838; transform: translateY(-2px); } button.secondary { background-color: #6c757d; color: white; } button.secondary:hover { background-color: #5a6268; transform: translateY(-2px); } .results-container { margin-top: 30px; padding: 25px; background-color: var(–primary-color); color: white; border-radius: 6px; text-align: center; box-shadow: inset 0 3px 10px rgba(0, 0, 0, 0.2); } .results-container h3 { color: white; margin-bottom: 15px; } .main-result { font-size: 2.8em; font-weight: bold; margin: 10px 0 20px 0; display: block; background-color: var(–success-color); padding: 15px; border-radius: 5px; } .intermediate-results { display: flex; flex-wrap: wrap; justify-content: center; gap: 20px; margin-top: 20px; padding-top: 20px; border-top: 1px dashed rgba(255, 255, 255, 0.5); } .intermediate-results .result-item { text-align: center; background-color: rgba(255, 255, 255, 0.15); padding: 10px 15px; border-radius: 5px; min-width: 120px; } .intermediate-results .result-item .label { font-size: 0.9em; opacity: 0.8; margin-bottom: 5px; } .intermediate-results .result-item .value { font-size: 1.4em; font-weight: bold; } .formula-explanation { margin-top: 25px; font-size: 0.95em; font-style: italic; color: rgba(255, 255, 255, 0.9); } .chart-container { margin-top: 40px; padding: 25px; background-color: var(–card-background); border: 1px solid var(–border-color); border-radius: 8px; text-align: center; } .chart-container h3 { margin-top: 0; } canvas { max-width: 100%; height: auto !important; /* Ensure canvas scales properly */ display: block; /* Remove extra space below canvas */ margin: 20px auto 10px auto; } .chart-caption { font-size: 0.9em; color: #6c757d; margin-top: 10px; } .table-container { margin-top: 40px; overflow-x: auto; /* For responsiveness on small screens */ } table { width: 100%; border-collapse: collapse; margin-top: 20px; } thead { background-color: var(–primary-color); color: white; } th, td { padding: 12px 15px; text-align: left; border: 1px solid var(–border-color); } th { font-weight: bold; } tbody tr:nth-child(even) { background-color: #f2f2f2; } tbody tr:hover { background-color: #e9ecef; } .table-caption { font-size: 0.95em; color: #6c757d; text-align: center; margin-bottom: 10px; } .article-content { margin-top: 40px; padding: 20px; background-color: var(–card-background); border: 1px solid var(–border-color); border-radius: 8px; } .article-content h2 { text-align: left; margin-top: 0; border-bottom: none; } .article-content h3 { text-align: left; color: #0056b3; margin-top: 30px; } .article-content p, .article-content ul, .article-content ol { margin-bottom: 20px; font-size: 1.05em; } .article-content ul, .article-content ol { padding-left: 40px; } .article-content li { margin-bottom: 10px; } .article-content a { color: var(–primary-color); text-decoration: none; } .article-content a:hover { text-decoration: underline; } .faq-item { margin-bottom: 15px; padding: 15px; border-left: 4px solid var(–primary-color); background-color: #fefefe; border-radius: 4px; } .faq-item strong { color: var(–primary-color); } .related-links ul { list-style: none; padding: 0; } .related-links li { margin-bottom: 15px; background-color: #f0f4f8; padding: 10px 15px; border-radius: 5px; border-left: 3px solid var(–primary-color); } .related-links li a { font-weight: bold; display: block; margin-bottom: 5px; } .related-links li p { margin-bottom: 0; font-size: 0.95em; color: #555; }

Weighted Accuracy Calculator (Python)

An essential tool for evaluating machine learning model performance, especially with imbalanced datasets.

Calculate Weighted Accuracy

Number of correctly predicted positive instances.
Number of correctly predicted negative instances.
Number of incorrectly predicted positive instances (Type I error).
Number of incorrectly predicted negative instances (Type II error).

Results

Weighted Accuracy = (Sensitivity + Specificity) / 2
Sensitivity (Recall)
Specificity
Overall Accuracy

Accuracy Metrics Comparison

Comparison of Weighted Accuracy, Overall Accuracy, Sensitivity, and Specificity.
Confusion Matrix and Derived Metrics
Metric Value Formula
True Positives (TP)
True Negatives (TN)
False Positives (FP)
False Negatives (FN)
Sensitivity (Recall) TP / (TP + FN)
Specificity TN / (TN + FP)
Overall Accuracy (TP + TN) / Total
Weighted Accuracy (Sensitivity + Specificity) / 2

What is Weighted Accuracy (Python)?

Weighted accuracy, in the context of machine learning and Python, is a metric used to evaluate the performance of classification models, particularly when dealing with imbalanced datasets. Unlike standard accuracy, which treats all misclassifications equally, weighted accuracy considers the relative importance or cost associated with different types of errors. In its simplest form, often referred to as balanced accuracy or simply mean of sensitivity and specificity, it provides a more nuanced view than raw accuracy when class distributions are uneven.

The need for weighted accuracy arises frequently in real-world applications. For example, in medical diagnoses, a false negative (failing to detect a disease) might have far more severe consequences than a false positive (incorrectly diagnosing a healthy patient). In fraud detection, a false negative (missing a fraudulent transaction) is significantly more costly than a false positive (flagging a legitimate transaction). Python libraries like Scikit-learn provide tools to calculate various performance metrics, but understanding the underlying concepts, like weighted accuracy, is crucial for effective model evaluation and selection.

Who Should Use It?

  • Data scientists and machine learning engineers working with classification problems.
  • Anyone evaluating models on datasets where class imbalance is present.
  • Practitioners needing a performance metric that accounts for the differential cost of errors.

Common Misconceptions about Weighted Accuracy:

  • Misconception: Weighted accuracy is always the best metric.
    Reality: The choice of metric depends heavily on the specific problem and the costs associated with different errors. Other metrics like Precision, Recall, F1-score, or AUC might be more appropriate in certain scenarios.
  • Misconception: Weighted accuracy is complex to calculate.
    Reality: The core concept (average of sensitivity and specificity) is straightforward, and Python tools make implementation easy.
  • Misconception: Weighted accuracy is synonymous with precision or recall.
    Reality: While related, weighted accuracy specifically averages sensitivity (recall for the positive class) and specificity (recall for the negative class).

Weighted Accuracy (Python) Formula and Mathematical Explanation

The most common interpretation of weighted accuracy in a binary classification context is the average of the model's sensitivity and specificity. This metric is also often referred to as balanced accuracy.

Let's break down the components:

First, we need to understand the components of a confusion matrix:

  • True Positives (TP): The number of instances correctly predicted as positive.
  • True Negatives (TN): The number of instances correctly predicted as negative.
  • False Positives (FP): The number of instances incorrectly predicted as positive (predicted positive, but actually negative – Type I error).
  • False Negatives (FN): The number of instances incorrectly predicted as negative (predicted negative, but actually positive – Type II error).

From these, we derive:

  • Sensitivity (Recall or True Positive Rate): The proportion of actual positives that were correctly identified.
    Formula: Sensitivity = TP / (TP + FN)
  • Specificity (True Negative Rate): The proportion of actual negatives that were correctly identified.
    Formula: Specificity = TN / (TN + FP)

The Overall Accuracy metric is calculated as:

Overall Accuracy = (TP + TN) / (TP + TN + FP + FN)

Finally, the Weighted Accuracy (Balanced Accuracy) is the average of Sensitivity and Specificity:

Weighted Accuracy = (Sensitivity + Specificity) / 2

This formula gives equal weight to the performance on the positive and negative classes. If the dataset is perfectly balanced and the model performs equally well on both classes, the weighted accuracy will be similar to the overall accuracy. However, with imbalanced classes, it provides a more reliable picture of performance.

Variables Table

Variable Meaning Unit Typical Range
TP True Positives Count ≥ 0
TN True Negatives Count ≥ 0
FP False Positives Count ≥ 0
FN False Negatives Count ≥ 0
Sensitivity True Positive Rate Proportion [0, 1]
Specificity True Negative Rate Proportion [0, 1]
Overall Accuracy Correct Predictions / Total Predictions Proportion [0, 1]
Weighted Accuracy (Sensitivity + Specificity) / 2 Proportion [0, 1]

Practical Examples (Real-World Use Cases)

Example 1: Medical Diagnosis (Imbalanced Dataset)

Consider a model designed to detect a rare disease. Out of 1000 patients tested:

  • 800 are healthy (Negative Class)
  • 200 have the disease (Positive Class)

The model correctly identifies 180 patients with the disease and misses 20.

It correctly identifies 750 healthy patients and incorrectly flags 50 healthy patients as having the disease.

Inputs:

  • True Positives (TP): 180
  • False Negatives (FN): 20 (200 actual positive – 180 predicted positive)
  • True Negatives (TN): 750
  • False Positives (FP): 50 (800 actual negative – 750 predicted negative)

Calculations:

  • Sensitivity = 180 / (180 + 20) = 180 / 200 = 0.90
  • Specificity = 750 / (750 + 50) = 750 / 800 = 0.9375
  • Overall Accuracy = (180 + 750) / 1000 = 930 / 1000 = 0.93
  • Weighted Accuracy = (0.90 + 0.9375) / 2 = 1.8375 / 2 = 0.91875

Interpretation: The overall accuracy is 93%, which sounds high. However, the weighted accuracy is 91.875%. While still good, it highlights that the model's performance on the negative class (Specificity) is slightly better than on the positive class (Sensitivity). This is important because the positive class (having the disease) is rarer, and detecting it correctly (high sensitivity) is crucial. The weighted accuracy gives a more balanced view than overall accuracy in this imbalanced scenario.

Example 2: Fraud Detection System

A financial institution uses a model to detect fraudulent transactions. Out of 5000 transactions:

  • 4800 are legitimate (Negative Class)
  • 200 are fraudulent (Positive Class)

The model correctly flags 150 fraudulent transactions but misses 50.

It correctly classifies 4700 legitimate transactions and incorrectly flags 100 legitimate transactions as fraudulent.

Inputs:

  • True Positives (TP): 150
  • False Negatives (FN): 50 (200 actual positive – 150 predicted positive)
  • True Negatives (TN): 4700
  • False Positives (FP): 100 (4800 actual negative – 4700 predicted negative)

Calculations:

  • Sensitivity = 150 / (150 + 50) = 150 / 200 = 0.75
  • Specificity = 4700 / (4700 + 100) = 4700 / 4800 = 0.9792
  • Overall Accuracy = (150 + 4700) / 5000 = 4850 / 5000 = 0.97
  • Weighted Accuracy = (0.75 + 0.9792) / 2 = 1.7292 / 2 = 0.8646

Interpretation: The overall accuracy of 97% might suggest excellent performance. However, the weighted accuracy drops significantly to 86.46%. This highlights a critical issue: while the model is very good at identifying legitimate transactions (high specificity), it struggles to detect actual fraud (lower sensitivity). Missing fraudulent transactions (FN) can be extremely costly. In this case, focusing solely on overall accuracy would be misleading, and improving the model's sensitivity would be a priority.

How to Use This Weighted Accuracy Calculator

Our Weighted Accuracy Calculator is designed to be intuitive and provide immediate insights into your classification model's performance, especially for imbalanced datasets. Here's how to use it effectively:

  1. Gather Your Confusion Matrix Data: Before using the calculator, you need the four key values from your model's confusion matrix: True Positives (TP), True Negatives (TN), False Positives (FP), and False Negatives (FN).
  2. Input the Values: Enter these four numbers into the corresponding input fields: "True Positives (TP)", "True Negatives (TN)", "False Positives (FP)", and "False Negatives (FN)".
  3. Calculate: Click the "Calculate" button. The calculator will instantly process the inputs.
  4. Review the Results:
    • Primary Result (Weighted Accuracy): The most prominent value displayed is the Weighted Accuracy (Balanced Accuracy), presented in a large, highlighted format. This is your primary performance metric, especially for imbalanced classes.
    • Intermediate Values: You'll also see the calculated Sensitivity, Specificity, and Overall Accuracy. These provide context and allow for a more detailed analysis.
    • Chart: The dynamic chart visually compares the different accuracy metrics, making it easy to spot discrepancies between overall accuracy and balanced performance.
    • Table: The confusion matrix table provides a detailed breakdown of the input values, along with the formulas used to derive each metric.
  5. Interpret the Findings: Compare the Weighted Accuracy to the Overall Accuracy. A significant difference suggests class imbalance issues. Analyze the individual Sensitivity and Specificity values to understand where your model excels and where it struggles. For instance, if detecting the positive class is critical (e.g., disease detection), prioritize high Sensitivity, even if it slightly reduces Specificity.
  6. Reset or Copy: Use the "Reset" button to clear the fields and start over with new values. Use the "Copy Results" button to copy the key metrics and assumptions to your clipboard for reporting or documentation.

Decision-Making Guidance:

  • High Weighted Accuracy, Similar to Overall Accuracy: Your model performs well and consistently across classes, even with potential imbalance.
  • High Overall Accuracy, Lower Weighted Accuracy: Your model is performing much better on the majority class than the minority class. You need to focus on improving performance for the minority class (e.g., through resampling techniques, cost-sensitive learning, or feature engineering).
  • Low Sensitivity: The model is poor at identifying positive instances. Critical for problems like disease detection or fraud detection.
  • Low Specificity: The model is poor at identifying negative instances. Can lead to many false alarms in spam detection or security systems.

Use the insights gained from this calculator to guide your model improvement strategies and make informed decisions about model deployment.

Key Factors That Affect Weighted Accuracy Results

Several factors can influence the weighted accuracy (balanced accuracy) of a classification model. Understanding these is key to interpreting results and improving model performance:

  1. Class Imbalance: This is the most significant factor. When one class has far more samples than others, overall accuracy can be misleadingly high. Weighted accuracy is specifically designed to mitigate this by averaging performance across classes. A highly imbalanced dataset will almost always result in a lower weighted accuracy compared to overall accuracy if the model favors the majority class.
  2. Model Performance on Minority Class: Weighted accuracy directly reflects how well the model identifies the positive class (Sensitivity) and the negative class (Specificity). If the model struggles to correctly classify instances of the minority class (leading to low Sensitivity for positive class or low Specificity for negative class if that's the minority), the weighted accuracy will suffer disproportionately compared to overall accuracy.
  3. Choice of Evaluation Metric: While weighted accuracy is valuable, it might not be the ultimate goal. Depending on the business objective, maximizing precision, recall (sensitivity), F1-score, or AUC might be more important. For instance, in a scenario where false positives are extremely detrimental (e.g., incorrectly diagnosing a severe disease leading to unnecessary treatment), a high specificity might be prioritized over a balanced score.
  4. Feature Engineering and Selection: The quality and relevance of the input features significantly impact a model's ability to distinguish between classes. Poor features lead to poor classification, affecting TP, TN, FP, and FN, and thus all derived metrics including weighted accuracy. Effective feature engineering can dramatically improve both sensitivity and specificity.
  5. Hyperparameter Tuning: Model hyperparameters (e.g., regularization strength in logistic regression, depth of trees in random forests) control the model's complexity and learning process. Improper tuning can lead to underfitting or overfitting, negatively impacting performance on both majority and minority classes, thereby reducing weighted accuracy.
  6. Data Quality and Noise: Errors, outliers, or noise in the training data can confuse the model, leading to misclassifications. This affects the accuracy of the confusion matrix components. If noise disproportionately affects the minority class, it can heavily skew the weighted accuracy downward.
  7. Threshold Selection (for probabilistic models): Many classification models output probabilities. The decision threshold (often defaulted to 0.5) determines the final class prediction. Adjusting this threshold can trade off sensitivity and specificity. While weighted accuracy averages these, understanding the underlying trade-off driven by the threshold is crucial for optimization based on specific error costs.

Frequently Asked Questions (FAQ)

Q1: What is the difference between Overall Accuracy and Weighted Accuracy?
A1: Overall Accuracy calculates the total correct predictions (TP + TN) divided by all predictions. Weighted Accuracy, often called Balanced Accuracy, is the average of Sensitivity (Recall) and Specificity. It's more reliable for imbalanced datasets because it equally considers performance on both positive and negative classes.
Q2: When should I use Weighted Accuracy over Overall Accuracy?
A2: You should strongly consider Weighted Accuracy when your dataset has a significant class imbalance (e.g., one class has 10x more samples than the other). In such cases, Overall Accuracy can be artificially inflated by correctly predicting the majority class, masking poor performance on the minority class.
Q3: Is Weighted Accuracy the same as the F1-Score?
A3: No. Weighted Accuracy is the average of Sensitivity and Specificity. The F1-Score is the harmonic mean of Precision and Sensitivity (Recall). While both are useful for imbalanced data, they measure different aspects of performance. F1-Score heavily emphasizes correct positive predictions (Precision), while Weighted Accuracy balances performance on positive and negative classes.
Q4: Can Weighted Accuracy be 100%?
A4: Yes. Weighted Accuracy can be 100% (or 1.0) if both Sensitivity and Specificity are 100%. This means the model perfectly identifies all positive instances and all negative instances without any errors.
Q5: My weighted accuracy is much lower than my overall accuracy. What does this mean?
A5: This typically indicates that your model performs significantly better on the majority class than on the minority class. The overall accuracy is being boosted by high performance on the abundant class, while the weighted accuracy reveals poor performance on the scarce class. You should investigate why the minority class is being misclassified.
Q6: How does Python's Scikit-learn calculate balanced_accuracy_score?
A6: Scikit-learn's `balanced_accuracy_score` function computes exactly what we've described: the average of recall obtained on each class. For binary classification, this is equivalent to `(Sensitivity + Specificity) / 2`.
Q7: What if I have more than two classes (multi-class classification)?
A7: For multi-class problems, "weighted accuracy" can have different interpretations. A common approach is to calculate sensitivity and specificity for each class individually and then average them, possibly weighted by class support (number of true instances for each label). Scikit-learn's `balanced_accuracy_score` handles this by averaging the recall obtained on each class.
Q8: Should I always aim for the highest possible Weighted Accuracy?
A8: Not necessarily. The "best" metric depends on the specific application's goals and the costs of different types of errors. If, for example, false negatives are far more costly than false positives, you might prioritize sensitivity even if it lowers the overall weighted accuracy slightly. Always align your metric choice with your business or project objectives.

© 2023 Your Company Name. All rights reserved.

var ctx; var metricsChart; var defaultData = { tp: 85, tn: 70, fp: 15, fn: 30 }; function initializeChart() { var canvas = document.getElementById('metricsChart'); ctx = canvas.getContext('2d'); metricsChart = new Chart(ctx, { type: 'bar', data: { labels: ['Weighted Acc.', 'Overall Acc.', 'Sensitivity', 'Specificity'], datasets: [{ label: 'Performance Metric', data: [0, 0, 0, 0], // Initial data backgroundColor: [ 'rgba(0, 74, 153, 0.7)', // Primary color for Weighted Acc. 'rgba(40, 167, 69, 0.7)', // Success color for Overall Acc. 'rgba(255, 193, 7, 0.7)', // Warning color for Sensitivity 'rgba(108, 117, 125, 0.7)' // Secondary color for Specificity ], borderColor: [ 'rgba(0, 74, 153, 1)', 'rgba(40, 167, 69, 1)', 'rgba(255, 193, 7, 1)', 'rgba(108, 117, 125, 1)' ], borderWidth: 1 }] }, options: { responsive: true, maintainAspectRatio: false, scales: { y: { beginAtZero: true, max: 1, // Percentages are between 0 and 1 ticks: { callback: function(value) { return (value * 100).toFixed(0) + '%'; } } } }, plugins: { legend: { display: false }, tooltip: { callbacks: { label: function(context) { var label = context.dataset.label || "; if (label) { label += ': '; } if (context.parsed.y !== null) { label += (context.parsed.y * 100).toFixed(2) + '%'; } return label; } } } } } }); } // — Chart.js integration (requires Chart.js library) — // NOTE: For a pure HTML/JS solution without external libraries, // you would need to implement chart drawing using Canvas API or SVG directly. // For demonstration purposes, assuming Chart.js is available. // If not, this part needs to be replaced with a native implementation. // For this exercise, we'll simulate it but acknowledge the dependency. // In a real-world scenario, you'd include Chart.js via CDN or local file. // For this output, we MUST assume Chart.js is NOT available and draw manually. // **Revised approach: Native Canvas drawing** function drawChart(weightedAcc, overallAcc, sensitivity, specificity) { if (!ctx) { var canvas = document.getElementById('metricsChart'); if (!canvas) return; // Canvas not found ctx = canvas.getContext('2d'); // Set canvas dimensions if needed (or rely on CSS) canvas.width = 600; // Example width canvas.height = 300; // Example height } ctx.clearRect(0, 0, ctx.canvas.width, ctx.canvas.height); // Clear previous drawing var canvasWidth = ctx.canvas.width; var canvasHeight = ctx.canvas.height; var barWidth = (canvasWidth * 0.8) / 4; // 80% width for bars, 4 bars var gapWidth = barWidth * 0.2; // 20% gap between bars var startX = (canvasWidth – (4 * barWidth + 3 * gapWidth)) / 2; // Center the bars var dataPoints = [weightedAcc, overallAcc, sensitivity, specificity]; var colors = [ 'rgba(0, 74, 153, 0.7)', // Weighted Acc. 'rgba(40, 167, 69, 0.7)', // Overall Acc. 'rgba(255, 193, 7, 0.7)', // Sensitivity 'rgba(108, 117, 125, 0.7)' // Specificity ]; var labels = ['Weighted Acc.', 'Overall Acc.', 'Sensitivity', 'Specificity']; // Draw Axes ctx.strokeStyle = '#ccc'; ctx.lineWidth = 1; // Y-axis line ctx.beginPath(); ctx.moveTo(startX – 10, canvasHeight * 0.1); ctx.lineTo(startX – 10, canvasHeight * 0.9); ctx.stroke(); // X-axis line ctx.beginPath(); ctx.moveTo(startX – 10, canvasHeight * 0.9); ctx.lineTo(startX + 4 * barWidth + 3 * gapWidth, canvasHeight * 0.9); ctx.stroke(); // Draw Y-axis labels (0%, 25%, 50%, 75%, 100%) ctx.fillStyle = '#666'; ctx.textAlign = 'right'; ctx.font = '10px Arial'; for (var i = 0; i <= 4; i++) { var value = i * 0.25; var yPos = canvasHeight * 0.9 – (value * canvasHeight * 0.8); ctx.fillText((value * 100).toFixed(0) + '%', startX – 15, yPos); } // Draw Bars for (var i = 0; i < dataPoints.length; i++) { var value = dataPoints[i]; var barHeight = value * (canvasHeight * 0.8); // 80% of chart height for bars var x = startX + i * (barWidth + gapWidth); var y = canvasHeight * 0.9 – barHeight; ctx.fillStyle = colors[i]; ctx.fillRect(x, y, barWidth, barHeight); // Draw Label below bar ctx.fillStyle = '#333'; ctx.textAlign = 'center'; ctx.font = '10px Arial'; ctx.fillText(labels[i], x + barWidth / 2, canvasHeight * 0.95); // Draw Value label above bar ctx.fillStyle = '#000'; ctx.fillText((value * 100).toFixed(1) + '%', x + barWidth / 2, y – 5); } } function calculateWeightedAccuracy() { var tp = parseFloat(document.getElementById('truePositives').value); var tn = parseFloat(document.getElementById('trueNegatives').value); var fp = parseFloat(document.getElementById('falsePositives').value); var fn = document.getElementById('falseNegatives').value; // Clear previous errors document.getElementById('tpError').textContent = ''; document.getElementById('tnError').textContent = ''; document.getElementById('fpError').textContent = ''; document.getElementById('fnError').textContent = ''; var errors = false; // Input validation if (isNaN(tp) || tp < 0) { document.getElementById('tpError').textContent = 'Please enter a valid non-negative number for TP.'; errors = true; } if (isNaN(tn) || tn < 0) { document.getElementById('tnError').textContent = 'Please enter a valid non-negative number for TN.'; errors = true; } if (isNaN(fp) || fp < 0) { document.getElementById('fpError').textContent = 'Please enter a valid non-negative number for FP.'; errors = true; } var fnValue = parseFloat(fn); // Parse fn separately for clear error message if (isNaN(fnValue) || fnValue < 0) { document.getElementById('fnError').textContent = 'Please enter a valid non-negative number for FN.'; errors = true; } else { fn = fnValue; // Assign parsed value if valid } if (errors) { // Clear results if there are errors document.getElementById('weightedAccuracyResult').textContent = '–'; document.getElementById('sensitivityValue').textContent = '–'; document.getElementById('specificityValue').textContent = '–'; document.getElementById('overallAccuracyValue').textContent = '–'; updateTable('–', '–', '–', '–', '–', '–', '–', '–'); drawChart(0, 0, 0, 0); // Draw empty chart return; } var totalPositives = tp + fn; var totalNegatives = tn + fp; var totalSamples = totalPositives + totalNegatives; var sensitivity = (totalPositives === 0) ? 0 : tp / totalPositives; var specificity = (totalNegatives === 0) ? 0 : tn / totalNegatives; var overallAccuracy = (totalSamples === 0) ? 0 : (tp + tn) / totalSamples; var weightedAccuracy = (sensitivity + specificity) / 2; // Format results to percentage with 2 decimal places var weightedAccuracyFormatted = (weightedAccuracy * 100).toFixed(2) + '%'; var sensitivityFormatted = (sensitivity * 100).toFixed(2) + '%'; var specificityFormatted = (specificity * 100).toFixed(2) + '%'; var overallAccuracyFormatted = (overallAccuracy * 100).toFixed(2) + '%'; document.getElementById('weightedAccuracyResult').textContent = weightedAccuracyFormatted; document.getElementById('sensitivityValue').textContent = sensitivityFormatted; document.getElementById('specificityValue').textContent = specificityFormatted; document.getElementById('overallAccuracyValue').textContent = overallAccuracyFormatted; // Update table updateTable(tp, tn, fp, fn, sensitivityFormatted, specificityFormatted, overallAccuracyFormatted, weightedAccuracyFormatted); // Update chart drawChart(weightedAccuracy, overallAccuracy, sensitivity, specificity); } function updateTable(tp, tn, fp, fn, sensitivity, specificity, overallAcc, weightedAcc) { document.getElementById('tpTableValue').textContent = tp; document.getElementById('tnTableValue').textContent = tn; document.getElementById('fpTableValue').textContent = fp; document.getElementById('fnTableValue').textContent = fn; document.getElementById('sensitivityTableValue').textContent = sensitivity; document.getElementById('specificityTableValue').textContent = specificity; document.getElementById('overallAccuracyTableValue').textContent = overallAcc; document.getElementById('weightedAccuracyTableValue').textContent = weightedAcc; } function resetCalculator() { document.getElementById('truePositives').value = defaultData.tp; document.getElementById('trueNegatives').value = defaultData.tn; document.getElementById('falsePositives').value = defaultData.fp; document.getElementById('falseNegatives').value = defaultData.fn; // Clear errors document.getElementById('tpError').textContent = ''; document.getElementById('tnError').textContent = ''; document.getElementById('fpError').textContent = ''; document.getElementById('fnError').textContent = ''; calculateWeightedAccuracy(); // Recalculate with default values } function copyResults() { var weightedAcc = document.getElementById('weightedAccuracyResult').textContent; var sensitivity = document.getElementById('sensitivityValue').textContent; var specificity = document.getElementById('specificityValue').textContent; var overallAcc = document.getElementById('overallAccuracyValue').textContent; var tp = document.getElementById('truePositives').value; var tn = document.getElementById('trueNegatives').value; var fp = document.getElementById('falsePositives').value; var fn = document.getElementById('falseNegatives').value; var assumptions = "Key Assumptions:\n"; assumptions += "- True Positives (TP): " + tp + "\n"; assumptions += "- True Negatives (TN): " + tn + "\n"; assumptions += "- False Positives (FP): " + fp + "\n"; assumptions += "- False Negatives (FN): " + fn + "\n"; var resultsText = "Weighted Accuracy Calculation Results:\n\n"; resultsText += "Weighted Accuracy: " + weightedAcc + "\n"; resultsText += "Sensitivity: " + sensitivity + "\n"; resultsText += "Specificity: " + specificity + "\n"; resultsText += "Overall Accuracy: " + overallAcc + "\n\n"; resultsText += assumptions; // Use Clipboard API for modern browsers if (navigator.clipboard && window.isSecureContext) { navigator.clipboard.writeText(resultsText).then(function() { // Show temporary confirmation message var btn = event.target; btn.textContent = 'Copied!'; setTimeout(function() { btn.textContent = 'Copy Results'; }, 2000); }).catch(function(err) { console.error('Could not copy text: ', err); // Fallback for environments where Clipboard API isn't available/secure copyTextFallback(resultsText); }); } else { // Fallback for older browsers or non-HTTPS contexts copyTextFallback(resultsText); } } function copyTextFallback(text) { var textArea = document.createElement("textarea"); textArea.value = text; textArea.style.position = "fixed"; // Avoid scrolling to bottom textArea.style.top = "0"; textArea.style.left = "0"; textArea.style.opacity = "0"; document.body.appendChild(textArea); textArea.focus(); textArea.select(); try { var successful = document.execCommand('copy'); var msg = successful ? 'Copied!' : 'Copy failed!'; console.log('Fallback: ' + msg); // Show temporary confirmation message var btn = document.querySelector('button.success'); // Assuming the Copy button has class 'success' btn.textContent = msg; setTimeout(function() { btn.textContent = 'Copy Results'; }, 2000); } catch (err) { console.error('Fallback: Oops, unable to copy', err); var btn = document.querySelector('button.success'); btn.textContent = 'Copy Failed'; setTimeout(function() { btn.textContent = 'Copy Results'; }, 2000); } document.body.removeChild(textArea); } // Initialize on page load window.onload = function() { // Initialize the chart drawing context var canvas = document.getElementById('metricsChart'); if (canvas) { ctx = canvas.getContext('2d'); canvas.width = canvas.parentElement.offsetWidth; // Set canvas width to parent width canvas.height = 300; // Set a default height } resetCalculator(); // Load default values and calculate initial results }; // Adjust canvas size on window resize window.addEventListener('resize', function() { var canvas = document.getElementById('metricsChart'); if (canvas && ctx) { canvas.width = canvas.parentElement.offsetWidth; // Update width // Redraw chart with potentially new dimensions if needed or just var CSS handle aspect ratio // For simplicity, we might just update width and var CSS handle height or recalculate height based on aspect ratio // Let's re-run the calculation to update drawing if needed var weightedAcc = parseFloat(document.getElementById('weightedAccuracyResult').textContent.replace('%', '')) / 100; var overallAcc = parseFloat(document.getElementById('overallAccuracyValue').textContent.replace('%', '')) / 100; var sensitivity = parseFloat(document.getElementById('sensitivityValue').textContent.replace('%', '')) / 100; var specificity = parseFloat(document.getElementById('specificityValue').textContent.replace('%', '')) / 100; drawChart(weightedAcc, overallAcc, sensitivity, specificity); } });

Leave a Comment