Skip to content

Commit fd6e39c

Browse files
committed
Merge branch 'feature_555_replace_plotly' into feature_556_copy_base_and_common_functionality
2 parents 07cf5bd + aafffd9 commit fd6e39c

2 files changed

Lines changed: 35 additions & 116 deletions

File tree

.github/workflows/unit_tests.yaml

Lines changed: 19 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,7 @@
11
# This workflow will install Python dependencies, run tests the specified Python version
22
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3-
name: Python 3.12 tests
3+
name: Python Tests
4+
run-name: ${{ github.event_name == 'workflow_dispatch' && format('{0} vs. {1}', github.ref_name, github.event.inputs.version_to_compare) || github.event.head_commit.message || github.event.pull_request.title }}
45

56
on:
67
push:
@@ -33,7 +34,10 @@ jobs:
3334
runs-on: ubuntu-latest
3435
outputs:
3536
branches: ${{ steps.set-branches.outputs.branches }}
37+
python-versions: ${{ steps.set-py-versions.outputs.versions }}
3638
steps:
39+
- id: set-py-versions
40+
run: echo "versions=[\"3.12\", \"3.13\"]" >> $GITHUB_OUTPUT
3741
- id: set-branches
3842
run: |
3943
if [ "${{ github.event_name }}" == "pull_request" ]; then
@@ -51,7 +55,7 @@ jobs:
5155
strategy:
5256
fail-fast: false
5357
matrix:
54-
python-version: ["3.12"]
58+
python-version: ${{ fromJSON(needs.setup-matrix.outputs.python-versions) }}
5559
branch: ${{ fromJSON(needs.setup-matrix.outputs.branches) }}
5660

5761
steps:
@@ -75,6 +79,10 @@ jobs:
7579
- name: Install dependencies
7680
run: |
7781
python -m pip install --upgrade pip
82+
83+
# force pandas to be less than 3.0.0 for Python 3.12 to test for backward compatibility
84+
if [ ${{ matrix.python-version }} == "3.12" ]; then pip install "pandas<3"; fi
85+
7886
python -m pip install -e METcalcpy
7987
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
8088
pip install --upgrade kaleido
@@ -87,12 +95,17 @@ jobs:
8795
- name: Upload output data artifact
8896
uses: actions/upload-artifact@v4
8997
with:
90-
name: test_output_${{ matrix.branch }}
98+
name: test_output_${{ matrix.branch }}_${{ matrix.python-version }}
9199
path: ${{ runner.workspace }}/output/${{ matrix.branch }}/test_output
92100

93101
compare-output:
94102
needs: [setup-matrix, build]
95103
runs-on: ubuntu-latest
104+
strategy:
105+
fail-fast: false
106+
matrix:
107+
python-version: ${{ fromJSON(needs.setup-matrix.outputs.python-versions) }}
108+
96109
# Only run if there are exactly 2 branches to compare
97110
if: fromJSON(needs.setup-matrix.outputs.branches)[1] != null
98111
steps:
@@ -111,7 +124,7 @@ jobs:
111124
sparse-checkout-cone-mode: false
112125
path: METplus
113126

114-
- name: Set up Python
127+
- name: Set up Python for diff_util.py
115128
uses: actions/setup-python@v5
116129
with:
117130
python-version: "3.12"
@@ -129,8 +142,8 @@ jobs:
129142
130143
# Define paths to the downloaded data
131144
# Note: upload-artifact v4 nested paths differently, usually named by 'name' provided in upload
132-
OUTPUT_DIR="${{ runner.workspace }}/artifacts/test_output_${OUTPUT_BRANCH}"
133-
TRUTH_DIR="${{ runner.workspace }}/artifacts/test_output_${TRUTH_BRANCH}"
145+
OUTPUT_DIR="${{ runner.workspace }}/artifacts/test_output_${OUTPUT_BRANCH}_${{ matrix.python-version }}"
146+
TRUTH_DIR="${{ runner.workspace }}/artifacts/test_output_${TRUTH_BRANCH}_${{ matrix.python-version }}"
134147
135148
echo "Comparing $OUTPUT_DIR and $TRUTH_DIR"
136149

test/skew_t/test_skew_t.py

Lines changed: 16 additions & 110 deletions
Original file line numberDiff line numberDiff line change
@@ -1,129 +1,35 @@
11
import pytest
22

33
import os
4-
import re
54

65
from metplotpy.plots.skew_t import skew_t as skew_t
76

87
def test_skew_t(module_setup_env):
8+
expected_times = {
9+
'2023010100': range(0, 61, 6),
10+
'2023010106': range(0, 49, 6),
11+
}
12+
expected_files = []
13+
for init, leads in expected_times.items():
14+
for lead in leads:
15+
expected_files.append(f'ssh052023_avno_doper_{init}_diag_{lead}_hr.png')
16+
917
custom_config_filename = os.path.join(os.environ['TEST_DIR'], "test_skew_t.yaml")
1018
skew_t.main(custom_config_filename)
1119

12-
# Verify that files for the ssh052023 data exists for the 0,6, 12,18,24, 30, 42,
13-
# 48, 54, and 60 hour data.
14-
output_dir = os.environ['TEST_OUTPUT']
20+
# Verify that files for the ssh052023 data exists for
21+
# the 0, 6, 12, 18, 24, 30, 42, 48, 54, and 60 hour data.
22+
# Some of these data files have incomplete data so check for the expected hour plots.
1523

16-
# Some of these data files have incomplete data so check for the expected hour
17-
# plots.
18-
19-
print(f"Output dir: {output_dir}")
2024
file_ext = '.png'
2125
files_of_interest = []
22-
for root, _, files in os.walk(output_dir):
26+
for root, _, files in os.walk(os.environ['TEST_OUTPUT']):
2327
for item in files:
2428
if item.endswith(file_ext):
25-
# print(f"Item of interest: {item}")
2629
full_file = os.path.join(root, item)
2730
base_file = os.path.basename(full_file)
2831
files_of_interest.append(base_file)
2932

30-
_check_files_exist(files_of_interest)
31-
_check_files_not_created(files_of_interest)
32-
_check_empty_input(files_of_interest)
33-
34-
35-
def _check_files_exist(files_of_interest):
36-
'''
37-
Checking that only the expected plot files are getting created and
38-
input files with only fill/missing data are not created.
39-
'''
40-
# List of files for the sh052023 data (which is missing data for hours 66-240).
41-
# Config file is requesting all the available sounding hours
42-
data_some_missing_data = {
43-
'2023010100': range(0, 61, 6),
44-
'2023010106': range(0, 49, 6),
45-
}
46-
47-
# Create a list of expected base file names with their expected hours.
48-
expected_base_filenames = []
49-
# Expected base for expected plot output name of format:
50-
# ssh_052023_avno_doper_202301010[0|6]_diag_[0-9]{1,2}_hr
51-
for filetime, expected_hours in data_some_missing_data.items():
52-
for cur_hr in expected_hours:
53-
base_hr = f'ssh052023_avno_doper_{filetime}_diag_{cur_hr}_hr'
54-
expected_base_filenames.append(base_hr)
55-
56-
# Subset only the files that correspond to the sh052023 data
57-
subset_files_of_interest = []
58-
for cur_file in files_of_interest:
59-
match_found = re.match(r'(ssh052023_.*).png', cur_file)
60-
if match_found:
61-
subset_files_of_interest.append(match_found.group(1))
62-
63-
# Verify that the expected plots were generated.
64-
num_found = 0
65-
for expected in expected_base_filenames:
66-
if expected in subset_files_of_interest:
67-
num_found += 1
68-
69-
assert len(expected_base_filenames) == num_found
70-
71-
72-
def _check_files_not_created(files_of_interest):
73-
'''
74-
Checking that input files with only fill/missing data are not created.
75-
'''
76-
# List of files with no sounding data (9999 for all fields and times)
77-
no_sounding_data = ['ssh162023_avno_doper_2023022712_diag',
78-
'ssh162023_avno_doper_2023022800_diag',
79-
'ssh162023_avno_doper_2023022806_diag',
80-
'ssh162023_avno_doper_2023030706_diag']
81-
82-
# Subset the files of interest to just sh162023 output.
83-
subsetted_files_of_interest = []
84-
for cur in files_of_interest:
85-
match = re.match(r'^ssh162023', cur)
86-
if match:
87-
subsetted_files_of_interest.append(cur)
88-
89-
# Verify that there aren't any plots created for the files with missing sounding
90-
# data. First, create a list of the base names of the plots that were created and
91-
# that correspond to the input data of interest (i.e. the sh162023_*.dat data).
92-
subsetted_basenames = []
93-
for cur_plot in subsetted_files_of_interest:
94-
match = re.match(r'(ssh162023_avno_doper_20230[0-9]{5}_diag)_*._hr.png',
95-
cur_plot)
96-
if match:
97-
subsetted_basenames.append(match.group(1))
98-
99-
# Count how often we find a basename of a plot that we didn't expect to create with
100-
# the list of base names of plots that were created.
101-
fail_counter = 0
102-
for cur in no_sounding_data:
103-
if cur in subsetted_basenames:
104-
fail_counter += 1
105-
106-
assert fail_counter == 0
107-
108-
109-
def _check_empty_input(files_of_interest):
110-
'''
111-
Checking that empty input file is not creating any plots.
112-
'''
113-
# List of empty files
114-
no_data_empty_file = ['sal092022_avno_doper_2022092800_diag']
115-
116-
# Verify that there aren't any plots created for the file with missing sounding
117-
# data.
118-
119-
# First, subset the files of interest to just sal0920223 output.
120-
subsetted_files_of_interest = []
121-
for cur in files_of_interest:
122-
match = re.match(r'^sal092022', cur)
123-
if match:
124-
subsetted_files_of_interest.append(cur)
125-
126-
match_found = re.match(r'^sal092022_avno_doper_2022092800_diag',
127-
no_data_empty_file[0])
128-
# The output file was created when it shouldn't have been, fail.
129-
assert match_found not in subsetted_files_of_interest
33+
assert len(expected_files) == len(files_of_interest)
34+
for expected_file in expected_files:
35+
assert expected_file in files_of_interest

0 commit comments

Comments
 (0)