Skip to content

API Reference: Utils

General Utilities

general

╔══════════════════════════════════════════════════════════════════════════════╗ ║ MHRQI - Multi-scale Hierarchical Representation of Quantum Images ║ ║ Utility Functions: Encoding, Reconstruction, Sibling Smoothing ║ ║ ║ ║ Author: Keno S. Jose ║ ║ License: Apache 2.0 ║ ╚══════════════════════════════════════════════════════════════════════════════╝

angle_map(img, bit_depth=8)

Map pixel intensities to quantum rotation angles via arcsin encoding.

Parameters:

Name Type Description Default
img

Grayscale image as integer array.

required
bit_depth

Bit depth of the image (default 8).

8

Returns:

Type Description

Array of angles in [0, π].

Source code in mhrqi/utils/general.py
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
def angle_map(img, bit_depth=8):
    """
    Map pixel intensities to quantum rotation angles via arcsin encoding.

    Args:
        img: Grayscale image as integer array.
        bit_depth: Bit depth of the image (default 8).

    Returns:
        Array of angles in [0, π].
    """
    max_val = (1 << bit_depth) - 1
    u = np.clip(img.astype(np.float64) / max_val, 0.0, 1.0)
    theta = 2.0 * np.arcsin(np.sqrt(u))
    return theta

compose_rc(hierarchical_coord_vector, d=2)

Convert a hierarchical coordinate vector to (row, col) pixel coordinates.

Parameters:

Name Type Description Default
hierarchical_coord_vector

Sequence of qudit values (qy0, qx0, qy1, qx1, ...). Length must be even.

required
d

Qudit dimension.

2

Returns:

Type Description

Tuple (r, c).

Raises:

Type Description
ValueError

If hierarchical_coord_vector length is odd or any digit is out of range.

Source code in mhrqi/utils/general.py
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
def compose_rc(hierarchical_coord_vector, d=2):
    """
    Convert a hierarchical coordinate vector to (row, col) pixel coordinates.

    Args:
        hierarchical_coord_vector: Sequence of qudit values (qy0, qx0, qy1, qx1, ...). Length must be even.
        d: Qudit dimension.

    Returns:
        Tuple (r, c).

    Raises:
        ValueError: If hierarchical_coord_vector length is odd or any digit is out of range.
    """
    if len(hierarchical_coord_vector) % 2 != 0:
        raise ValueError("hierarchical_coord_vector length must be even (pairs of qy,qx).")

    qy_digits = hierarchical_coord_vector[0::2]
    qx_digits = hierarchical_coord_vector[1::2]

    r = 0
    c = 0
    for digit in qy_digits:
        if not (0 <= digit < d):
            raise ValueError("qy digit out of range for given d.")
        r = r * d + int(digit)

    for digit in qx_digits:
        if not (0 <= digit < d):
            raise ValueError("qx digit out of range for given d.")
        c = c * d + int(digit)

    return r, c

compute_register(r, c, d, sk_prev)

Compute the qudit register values (qy, qx) for pixel (r, c) at a given scale.

Parameters:

Name Type Description Default
r

Row index.

required
c

Column index.

required
d

Qudit dimension.

required
sk_prev

Subdivision size at the previous level.

required

Returns:

Type Description

Tuple (qy, qx).

Source code in mhrqi/utils/general.py
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
def compute_register(r, c, d, sk_prev):
    """
    Compute the qudit register values (qy, qx) for pixel (r, c) at a given scale.

    Args:
        r: Row index.
        c: Column index.
        d: Qudit dimension.
        sk_prev: Subdivision size at the previous level.

    Returns:
        Tuple (qy, qx).
    """
    qx = min(math.floor((c % sk_prev) * (d / sk_prev)), d - 1)
    qy = min(math.floor((r % sk_prev) * (d / sk_prev)), d - 1)
    return qy, qx

generate_hierarchical_coord_matrix(N, d=2)

Generate the hierarchical coordinate matrix for an image of size N x N.

Parameters:

Name Type Description Default
N

Image side length.

required
d

Qudit dimension.

2

Returns:

Type Description

List of hierarchical coordinate vectors.

Source code in mhrqi/utils/general.py
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
def generate_hierarchical_coord_matrix(N, d=2):
    """
    Generate the hierarchical coordinate matrix for an image of size N x N.

    Args:
        N: Image side length.
        d: Qudit dimension.

    Returns:
        List of hierarchical coordinate vectors.
    """
    max_depth = get_max_depth(N, d)
    subdiv_sizes = []
    for level in range(0, max_depth):
        subdiv_sizes.append(N if level == 0 else get_subdiv_size(level, N, d))

    hierarchical_coord_matrix = []
    for r, c in np.ndindex(N, N):
        hierarchical_coord_vector = []
        for size in subdiv_sizes:
            sub_hcv = compute_register(r, c, d, size)
            hierarchical_coord_vector.extend(sub_hcv)
        hierarchical_coord_matrix.append(hierarchical_coord_vector)
    return hierarchical_coord_matrix

get_max_depth(N, d)

Compute the maximum hierarchy depth for image size N and qudit dimension d.

Parameters:

Name Type Description Default
N

Image side length.

required
d

Qudit dimension.

required

Returns:

Type Description

max_depth = floor(log_d(N)).

Source code in mhrqi/utils/general.py
33
34
35
36
37
38
39
40
41
42
43
44
45
def get_max_depth(N, d):
    """
    Compute the maximum hierarchy depth for image size N and qudit dimension d.

    Args:
        N: Image side length.
        d: Qudit dimension.

    Returns:
        max_depth = floor(log_d(N)).
    """
    max_depth = math.floor(math.log(N, d))
    return max_depth

get_subdiv_size(k, N, d)

Compute the subdivision size at hierarchy level k.

Parameters:

Name Type Description Default
k

Hierarchy level.

required
N

Image side length.

required
d

Qudit dimension.

required

Returns:

Type Description

Side length of subregions at level k.

Source code in mhrqi/utils/general.py
48
49
50
51
52
53
54
55
56
57
58
59
60
61
def get_subdiv_size(k, N, d):
    """
    Compute the subdivision size at hierarchy level k.

    Args:
        k: Hierarchy level.
        N: Image side length.
        d: Qudit dimension.

    Returns:
        Side length of subregions at level k.
    """
    s = N / (d**k)
    return s

mhrqi_bins_to_image(bins, hierarchical_coord_matrix, d, image_shape, bias_stats=None, original_img=None)

Reconstruct an image from measurement bins with optional confidence-weighted smoothing.

When bias_stats is provided, each pixel is blended with its 8-neighborhood weighted by its denoiser confidence. Neighbors with confidence below CONFIDENCE_THRESHOLD are used as context; high-confidence pixels are trusted as-is.

Parameters:

Name Type Description Default
bins

Measurement bins dict mapping position tuples to intensity stats.

required
hierarchical_coord_matrix

List of hierarchical coordinate vectors.

required
d

Qudit dimension.

required
image_shape

Output image shape as (H, W).

required
bias_stats

Optional dict mapping position tuples to hit/miss counts.

None
original_img

Optional pre-computed baseline image to use as source.

None

Returns:

Type Description

Reconstructed image as a float array of shape image_shape.

Source code in mhrqi/utils/general.py
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
def mhrqi_bins_to_image(
    bins, hierarchical_coord_matrix, d, image_shape, bias_stats=None, original_img=None
):
    """
    Reconstruct an image from measurement bins with optional confidence-weighted smoothing.

    When bias_stats is provided, each pixel is blended with its 8-neighborhood
    weighted by its denoiser confidence. Neighbors with confidence below
    CONFIDENCE_THRESHOLD are used as context; high-confidence pixels are
    trusted as-is.

    Args:
        bins: Measurement bins dict mapping position tuples to intensity stats.
        hierarchical_coord_matrix: List of hierarchical coordinate vectors.
        d: Qudit dimension.
        image_shape: Output image shape as (H, W).
        bias_stats: Optional dict mapping position tuples to hit/miss counts.
        original_img: Optional pre-computed baseline image to use as source.

    Returns:
        Reconstructed image as a float array of shape image_shape.
    """
    img = np.zeros(image_shape)
    N = image_shape[0]

    reconstructed_baseline = np.zeros(image_shape)
    for hierarchical_coord_vector in hierarchical_coord_matrix:
        key = tuple(hierarchical_coord_vector)
        if key in bins and bins[key].get("count", 0) > 0:
            avg_intensity = bins[key]["intensity_sum"] / bins[key]["count"]
            r, c = compose_rc(hierarchical_coord_vector, d)
            reconstructed_baseline[r, c] = avg_intensity

    source_img = original_img if original_img is not None else reconstructed_baseline

    if bias_stats is None:
        return source_img

    CONFIDENCE_THRESHOLD = 0.7

    confidence_map = np.ones(image_shape) * 0.5
    for hierarchical_coord_vector in hierarchical_coord_matrix:
        key = tuple(hierarchical_coord_vector)
        r, c = compose_rc(hierarchical_coord_vector, d)
        if key in bias_stats:
            hit = bias_stats[key].get("hit", 0)
            miss = bias_stats[key].get("miss", 0)
            total = hit + miss
            confidence_map[r, c] = hit / total if total > 0 else 0.5

    for hierarchical_coord_vector in hierarchical_coord_matrix:
        key = tuple(hierarchical_coord_vector)
        r, c = compose_rc(hierarchical_coord_vector, d)
        confidence = confidence_map[r, c]

        trusted_neighbor_vals = []
        for dr in [-1, 0, 1]:
            for dc in [-1, 0, 1]:
                if dr == 0 and dc == 0:
                    continue
                nr, nc = r + dr, c + dc
                if 0 <= nr < N and 0 <= nc < N and confidence_map[nr, nc] <= CONFIDENCE_THRESHOLD:
                    trusted_neighbor_vals.append(source_img[nr, nc])

        if len(trusted_neighbor_vals) == 0:
            for dr in [-1, 0, 1]:
                for dc in [-1, 0, 1]:
                    if dr == 0 and dc == 0:
                        continue
                    nr, nc = r + dr, c + dc
                    if 0 <= nr < N and 0 <= nc < N:
                        trusted_neighbor_vals.append(source_img[nr, nc])

        context_avg = (
            np.median(trusted_neighbor_vals) if trusted_neighbor_vals else source_img[r, c]
        )

        img[r, c] = (confidence * source_img[r, c]) + ((1 - confidence) * context_avg)

    return img

Visualization

visualization

╔══════════════════════════════════════════════════════════════════════════════╗ ║ MHRQI - Multi-scale Hierarchical Representation of Quantum Images ║ ║ Plotting and Metrics: Visualization, Quality Assessment, Benchmarking ║ ║ ║ ║ Author: Keno S. Jose ║ ║ License: Apache 2.0 ║ ╚══════════════════════════════════════════════════════════════════════════════╝

ImagePlotter

Utilities for displaying and comparing images.

Source code in mhrqi/utils/visualization.py
904
905
906
907
908
909
910
class ImagePlotter:
    """Utilities for displaying and comparing images."""

    show_image_comparison = staticmethod(show_image_comparison)
    plot_mse_map = staticmethod(plot_mse_map)
    grid_to_image_uint8 = staticmethod(grid_to_image_uint8)
    bins_to_image = staticmethod(bins_to_image)

MetricsPlotter

Visualization for image quality metrics and comparison reports.

Source code in mhrqi/utils/visualization.py
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
class MetricsPlotter:
    """Visualization for image quality metrics and comparison reports."""

    @staticmethod
    def print_summary_text(competitors, keys, title):
        """Print a formatted metric table to stdout."""
        print("-" * 100)
        print(f" {title}")
        print("-" * 100)
        header = f"{'Method':<12}" + "".join([f"{k:<15}" for k in keys])
        print(header)
        print("-" * 100)
        for m in competitors:
            row = f"{m['name']:<12}"
            for k in keys:
                val = m["metrics"].get(k, float("nan"))
                row += f"{val:<15.4f}"
            print(row)
        print("-" * 100)
        print()

    @staticmethod
    def save_summary_report(
        ref_img,
        competitors,
        metric_keys,
        title,
        filename_suffix,
        save_dir,
        include_original_in_table=False,
    ):
        """
        Generate and save a unified figure with images and a ranked metrics table.

        Args:
            ref_img: Reference image (uint8) or None.
            competitors: List of dicts with 'name', 'metrics', 'image' keys.
            metric_keys: List of metric names to display.
            title: Figure title.
            filename_suffix: Output filename without extension.
            save_dir: Directory to save the figure.
            include_original_in_table: Whether to include 'Original' in the table.
        """
        if include_original_in_table:
            table_methods = competitors
        else:
            table_methods = [c for c in competitors if c["name"] != "Original"]

        if not table_methods:
            return

        data_map = {m["name"]: m["metrics"] for m in table_methods}
        names = [m["name"] for m in table_methods]

        higher_better = {"OMQDI", "EPF", "ENL", "EPI", "CNR", "NSF"}
        ranks = {k: {} for k in metric_keys}

        for k in metric_keys:
            is_higher = k in higher_better
            valid_items = [(name, data_map[name].get(k, float("nan"))) for name in names]
            valid_items = [x for x in valid_items if not np.isnan(x[1])]
            valid_items.sort(key=lambda x: x[1], reverse=is_higher)

            current_rank = 1
            prev_val = None
            for i, (name, val) in enumerate(valid_items):
                if prev_val is not None and val == prev_val:
                    ranks[k][name] = current_rank
                else:
                    current_rank = i + 1
                    ranks[k][name] = current_rank
                prev_val = val

        has_ref_plot = ref_img is not None
        n_imgs = len(table_methods) + (1 if has_ref_plot else 0)

        fig_width = max(10, n_imgs * 2.5)
        fig_height = 6

        fig = plt.figure(figsize=(fig_width, fig_height))

        gs = fig.add_gridspec(2, n_imgs, height_ratios=[1, 1], hspace=0.1)

        col_idx = 0

        if has_ref_plot:
            ax_ref = fig.add_subplot(gs[0, col_idx])
            ax_ref.imshow(ref_img, cmap="gray", vmin=0, vmax=255)
            ax_ref.set_title("Reference", fontsize=10, fontweight="bold")
            ax_ref.set_xticks([])
            ax_ref.set_yticks([])
            col_idx += 1

        for m in table_methods:
            ax = fig.add_subplot(gs[0, col_idx])
            ax.imshow(m["image"], cmap="gray", vmin=0, vmax=255)
            ax.set_title(m["name"], fontsize=10)
            ax.set_xticks([])
            ax.set_yticks([])
            col_idx += 1

        ax_table = fig.add_subplot(gs[1, :])
        ax_table.axis("off")

        cell_text = []
        for name in names:
            row = []
            for k in metric_keys:
                val = data_map[name].get(k, float("nan"))
                if np.isnan(val):
                    row.append("N/A")
                else:
                    r = ranks[k].get(name, "")
                    rank_str = f" (#{r})" if r else ""
                    row.append(f"{val:.4f}{rank_str}")
            cell_text.append(row)

        table = ax_table.table(
            cellText=cell_text,
            rowLabels=names,
            colLabels=metric_keys,
            loc="center",
            cellLoc="center",
        )
        table.auto_set_font_size(False)
        table.set_fontsize(10)
        table.scale(1.0, 1.5)

        for j in range(len(metric_keys)):
            table[(0, j)].set_facecolor("#4472C4")
            table[(0, j)].set_text_props(color="white", weight="bold")

        for i, name in enumerate(names):
            for j, k in enumerate(metric_keys):
                r = ranks[k].get(name, None)
                if r == 1:
                    table[(i + 1, j)].set_facecolor("#C6EFCE")
                    table[(i + 1, j)].set_text_props(weight="bold")

        fig.suptitle(title, fontsize=14, fontweight="bold", y=0.95)

        if save_dir:
            os.makedirs(save_dir, exist_ok=True)
            out_path = os.path.join(save_dir, f"{filename_suffix}.png")
            plt.savefig(out_path, dpi=150, bbox_inches="tight")
        plt.close(fig)

print_summary_text(competitors, keys, title) staticmethod

Print a formatted metric table to stdout.

Source code in mhrqi/utils/visualization.py
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
@staticmethod
def print_summary_text(competitors, keys, title):
    """Print a formatted metric table to stdout."""
    print("-" * 100)
    print(f" {title}")
    print("-" * 100)
    header = f"{'Method':<12}" + "".join([f"{k:<15}" for k in keys])
    print(header)
    print("-" * 100)
    for m in competitors:
        row = f"{m['name']:<12}"
        for k in keys:
            val = m["metrics"].get(k, float("nan"))
            row += f"{val:<15.4f}"
        print(row)
    print("-" * 100)
    print()

save_summary_report(ref_img, competitors, metric_keys, title, filename_suffix, save_dir, include_original_in_table=False) staticmethod

Generate and save a unified figure with images and a ranked metrics table.

Parameters:

Name Type Description Default
ref_img

Reference image (uint8) or None.

required
competitors

List of dicts with 'name', 'metrics', 'image' keys.

required
metric_keys

List of metric names to display.

required
title

Figure title.

required
filename_suffix

Output filename without extension.

required
save_dir

Directory to save the figure.

required
include_original_in_table

Whether to include 'Original' in the table.

False
Source code in mhrqi/utils/visualization.py
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
@staticmethod
def save_summary_report(
    ref_img,
    competitors,
    metric_keys,
    title,
    filename_suffix,
    save_dir,
    include_original_in_table=False,
):
    """
    Generate and save a unified figure with images and a ranked metrics table.

    Args:
        ref_img: Reference image (uint8) or None.
        competitors: List of dicts with 'name', 'metrics', 'image' keys.
        metric_keys: List of metric names to display.
        title: Figure title.
        filename_suffix: Output filename without extension.
        save_dir: Directory to save the figure.
        include_original_in_table: Whether to include 'Original' in the table.
    """
    if include_original_in_table:
        table_methods = competitors
    else:
        table_methods = [c for c in competitors if c["name"] != "Original"]

    if not table_methods:
        return

    data_map = {m["name"]: m["metrics"] for m in table_methods}
    names = [m["name"] for m in table_methods]

    higher_better = {"OMQDI", "EPF", "ENL", "EPI", "CNR", "NSF"}
    ranks = {k: {} for k in metric_keys}

    for k in metric_keys:
        is_higher = k in higher_better
        valid_items = [(name, data_map[name].get(k, float("nan"))) for name in names]
        valid_items = [x for x in valid_items if not np.isnan(x[1])]
        valid_items.sort(key=lambda x: x[1], reverse=is_higher)

        current_rank = 1
        prev_val = None
        for i, (name, val) in enumerate(valid_items):
            if prev_val is not None and val == prev_val:
                ranks[k][name] = current_rank
            else:
                current_rank = i + 1
                ranks[k][name] = current_rank
            prev_val = val

    has_ref_plot = ref_img is not None
    n_imgs = len(table_methods) + (1 if has_ref_plot else 0)

    fig_width = max(10, n_imgs * 2.5)
    fig_height = 6

    fig = plt.figure(figsize=(fig_width, fig_height))

    gs = fig.add_gridspec(2, n_imgs, height_ratios=[1, 1], hspace=0.1)

    col_idx = 0

    if has_ref_plot:
        ax_ref = fig.add_subplot(gs[0, col_idx])
        ax_ref.imshow(ref_img, cmap="gray", vmin=0, vmax=255)
        ax_ref.set_title("Reference", fontsize=10, fontweight="bold")
        ax_ref.set_xticks([])
        ax_ref.set_yticks([])
        col_idx += 1

    for m in table_methods:
        ax = fig.add_subplot(gs[0, col_idx])
        ax.imshow(m["image"], cmap="gray", vmin=0, vmax=255)
        ax.set_title(m["name"], fontsize=10)
        ax.set_xticks([])
        ax.set_yticks([])
        col_idx += 1

    ax_table = fig.add_subplot(gs[1, :])
    ax_table.axis("off")

    cell_text = []
    for name in names:
        row = []
        for k in metric_keys:
            val = data_map[name].get(k, float("nan"))
            if np.isnan(val):
                row.append("N/A")
            else:
                r = ranks[k].get(name, "")
                rank_str = f" (#{r})" if r else ""
                row.append(f"{val:.4f}{rank_str}")
        cell_text.append(row)

    table = ax_table.table(
        cellText=cell_text,
        rowLabels=names,
        colLabels=metric_keys,
        loc="center",
        cellLoc="center",
    )
    table.auto_set_font_size(False)
    table.set_fontsize(10)
    table.scale(1.0, 1.5)

    for j in range(len(metric_keys)):
        table[(0, j)].set_facecolor("#4472C4")
        table[(0, j)].set_text_props(color="white", weight="bold")

    for i, name in enumerate(names):
        for j, k in enumerate(metric_keys):
            r = ranks[k].get(name, None)
            if r == 1:
                table[(i + 1, j)].set_facecolor("#C6EFCE")
                table[(i + 1, j)].set_text_props(weight="bold")

    fig.suptitle(title, fontsize=14, fontweight="bold", y=0.95)

    if save_dir:
        os.makedirs(save_dir, exist_ok=True)
        out_path = os.path.join(save_dir, f"{filename_suffix}.png")
        plt.savefig(out_path, dpi=150, bbox_inches="tight")
    plt.close(fig)

TrendPlotter

Line graphs for trend analysis.

Source code in mhrqi/utils/visualization.py
913
914
915
916
class TrendPlotter:
    """Line graphs for trend analysis."""

    plot_shots_vs_mse = staticmethod(plot_shots_vs_mse)

auto_detect_rois(img)

Auto-detect signal and background ROIs for CNR calculation.

Signal ROI: centroid of the top-10% intensity region. Background ROI: lowest-variance block with center bias.

Parameters:

Name Type Description Default
img

Input image.

required

Returns:

Type Description

Tuple (signal_roi, bg_roi), each as (y, x, h, w).

Source code in mhrqi/utils/visualization.py
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
def auto_detect_rois(img):
    """
    Auto-detect signal and background ROIs for CNR calculation.

    Signal ROI: centroid of the top-10% intensity region.
    Background ROI: lowest-variance block with center bias.

    Args:
        img: Input image.

    Returns:
        Tuple (signal_roi, bg_roi), each as (y, x, h, w).
    """
    arr = _to_float_array(img)
    h, w = arr.shape

    block_size = min(16, h // 4, w // 4)
    if block_size < 4:
        block_size = 4

    threshold = np.percentile(arr, 90)
    signal_mask = arr > threshold

    y_coords, x_coords = np.where(signal_mask)
    if len(y_coords) > 0:
        cy, cx = int(np.mean(y_coords)), int(np.mean(x_coords))
        sy = max(0, min(cy - block_size // 2, h - block_size))
        sx = max(0, min(cx - block_size // 2, w - block_size))
        signal_roi = (sy, sx, block_size, block_size)
    else:
        signal_roi = (0, 0, block_size, block_size)

    best_var = float("inf")
    bg_roi = (0, 0, block_size, block_size)

    for y in range(0, h - block_size, block_size // 2):
        for x in range(0, w - block_size, block_size // 2):
            block = arr[y : y + block_size, x : x + block_size]
            mu = np.mean(block)
            if mu > 0.95:
                continue
            var = np.var(block)
            dist_to_center = np.sqrt(
                (y + block_size / 2 - h / 2) ** 2 + (x + block_size / 2 - w / 2) ** 2
            )
            dist_norm = dist_to_center / np.sqrt((h / 2) ** 2 + (w / 2) ** 2)
            cost = var + 0.05 * dist_norm
            if cost < best_var and var > 0:
                best_var = cost
                bg_roi = (y, x, block_size, block_size)

    return signal_roi, bg_roi

bins_to_image(bins, d, N, kind='p', eps=0.0, vmin=0.0, vmax=1.0)

Convert bins directly to a uint8 image.

Parameters:

Name Type Description Default
bins

Measurement bins dict.

required
d

Qudit dimension.

required
N

Image size.

required
kind

Value type ("p" for p-hat probability, "hit", "miss").

'p'
eps

Smoothing epsilon for p-hat.

0.0
vmin

Minimum value for scaling to [0, 255].

0.0
vmax

Maximum value for scaling to [0, 255].

1.0

Returns:

Type Description

uint8 image array of shape (N, N).

Source code in mhrqi/utils/visualization.py
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
def bins_to_image(bins, d, N, kind="p", eps=0.0, vmin=0.0, vmax=1.0):
    """
    Convert bins directly to a uint8 image.

    Args:
        bins: Measurement bins dict.
        d: Qudit dimension.
        N: Image size.
        kind: Value type ("p" for p-hat probability, "hit", "miss").
        eps: Smoothing epsilon for p-hat.
        vmin: Minimum value for scaling to [0, 255].
        vmax: Maximum value for scaling to [0, 255].

    Returns:
        uint8 image array of shape (N, N).
    """
    img = np.zeros((N, N), dtype=np.uint8)

    for hierarchical_coord_vector, v in bins.items():
        y, x = utils.compose_rc(hierarchical_coord_vector, d)
        val = _value_from_bin(v, kind, eps)
        if np.isnan(val):
            val = 0.0
        scaled = (np.clip(val, vmin, vmax) - vmin) / (vmax - vmin)
        img[y, x] = int(round(scaled * 255.0))

    return img

compute_cnr(img, signal_roi=None, bg_roi=None)

Compute Contrast-to-Noise Ratio (CNR).

CNR = |mean_signal - mean_bg| / std_bg. ROIs are auto-detected if not provided.

Parameters:

Name Type Description Default
img

Input image.

required
signal_roi

Optional (y, x, h, w) signal region.

None
bg_roi

Optional (y, x, h, w) background region.

None

Returns:

Type Description

Tuple (cnr_value, signal_roi, bg_roi). Higher CNR is better.

Source code in mhrqi/utils/visualization.py
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
def compute_cnr(img, signal_roi=None, bg_roi=None):
    """
    Compute Contrast-to-Noise Ratio (CNR).

    CNR = |mean_signal - mean_bg| / std_bg.
    ROIs are auto-detected if not provided.

    Args:
        img: Input image.
        signal_roi: Optional (y, x, h, w) signal region.
        bg_roi: Optional (y, x, h, w) background region.

    Returns:
        Tuple (cnr_value, signal_roi, bg_roi). Higher CNR is better.
    """
    arr = _to_float_array(img)

    if signal_roi is None or bg_roi is None:
        signal_roi, bg_roi = auto_detect_rois(arr)

    sy, sx, sh, sw = signal_roi
    by, bx, bh, bw = bg_roi

    signal_region = arr[sy : sy + sh, sx : sx + sw]
    bg_region = arr[by : by + bh, bx : bx + bw]

    mean_signal = np.mean(signal_region)
    mean_bg = np.mean(bg_region)
    std_bg = np.std(bg_region)

    eps = 1e-10
    if std_bg < eps:
        return (10000.0, signal_roi, bg_roi)

    cnr = abs(mean_signal - mean_bg) / std_bg
    return (float(min(cnr, 10000.0)), signal_roi, bg_roi)

compute_enl(img, roi=None)

Compute Equivalent Number of Looks (ENL).

ENL = mean² / variance. Evaluated on a homogeneous ROI if provided.

Parameters:

Name Type Description Default
img

Input image.

required
roi

Optional (y, x, h, w) region of interest.

None

Returns:

Type Description

ENL value. Higher is better. Capped at 10000.

Reference

Ulaby et al., 1986.

Source code in mhrqi/utils/visualization.py
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
def compute_enl(img, roi=None):
    """
    Compute Equivalent Number of Looks (ENL).

    ENL = mean² / variance. Evaluated on a homogeneous ROI if provided.

    Args:
        img: Input image.
        roi: Optional (y, x, h, w) region of interest.

    Returns:
        ENL value. Higher is better. Capped at 10000.

    Reference:
        Ulaby et al., 1986.
    """
    arr = _to_float_array(img)

    if roi is not None:
        y, x, h, w = roi
        region = arr[y : y + h, x : x + w]
    else:
        region = arr

    mean_val = np.mean(region)
    var_val = np.var(region)

    eps = 1e-10
    if var_val < eps:
        return 10000.0

    enl = (mean_val**2) / var_val
    return float(min(enl, 10000.0))

compute_epi(img_original, img_denoised)

Compute Edge Preservation Index (EPI).

EPI is the Pearson correlation of Sobel gradient magnitudes between the original and denoised images. Higher values indicate better edge preservation.

Parameters:

Name Type Description Default
img_original

Original image.

required
img_denoised

Denoised image.

required

Returns:

Type Description

EPI in [-1, 1]. Higher is better.

Reference

Sattar et al., 1997.

Source code in mhrqi/utils/visualization.py
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
def compute_epi(img_original, img_denoised):
    """
    Compute Edge Preservation Index (EPI).

    EPI is the Pearson correlation of Sobel gradient magnitudes between
    the original and denoised images. Higher values indicate better edge
    preservation.

    Args:
        img_original: Original image.
        img_denoised: Denoised image.

    Returns:
        EPI in [-1, 1]. Higher is better.

    Reference:
        Sattar et al., 1997.
    """
    import cv2

    orig = _to_float_array(img_original)
    denoised = _to_float_array(img_denoised)

    orig_u8 = (orig * 255).astype(np.uint8) if orig.max() <= 1.0 else orig.astype(np.uint8)
    den_u8 = (
        (denoised * 255).astype(np.uint8) if denoised.max() <= 1.0 else denoised.astype(np.uint8)
    )

    gx_o = cv2.Sobel(orig_u8, cv2.CV_64F, 1, 0, ksize=3)
    gy_o = cv2.Sobel(orig_u8, cv2.CV_64F, 0, 1, ksize=3)
    grad_orig = np.sqrt(gx_o**2 + gy_o**2)

    gx_d = cv2.Sobel(den_u8, cv2.CV_64F, 1, 0, ksize=3)
    gy_d = cv2.Sobel(den_u8, cv2.CV_64F, 0, 1, ksize=3)
    grad_den = np.sqrt(gx_d**2 + gy_d**2)

    corr = np.corrcoef(grad_orig.flatten(), grad_den.flatten())[0, 1]
    return float(corr) if not np.isnan(corr) else 0.0

compute_fsim(img_ref, img_test)

Compute FSIM (Feature Similarity Index) using piq.

Parameters:

Name Type Description Default
img_ref

Reference image.

required
img_test

Test image.

required

Returns:

Type Description

FSIM score in [0, 1]. Higher is better.

Source code in mhrqi/utils/visualization.py
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
def compute_fsim(img_ref, img_test):
    """
    Compute FSIM (Feature Similarity Index) using piq.

    Args:
        img_ref: Reference image.
        img_test: Test image.

    Returns:
        FSIM score in [0, 1]. Higher is better.
    """
    torch, piq = _ensure_torch_piq()

    ref = _to_float_array(img_ref)
    test = _to_float_array(img_test)

    if ref.max() > 1.0:
        ref /= 255.0
    if test.max() > 1.0:
        test /= 255.0

    ref = np.clip(ref, 0.0, 1.0)
    test = np.clip(test, 0.0, 1.0)

    def to_tensor(arr):
        t = torch.from_numpy(arr).float()
        if t.ndim == 2:
            return t.unsqueeze(0).unsqueeze(0)
        elif t.ndim == 3:
            return t.permute(2, 0, 1).unsqueeze(0)
        return t

    ref_t = to_tensor(ref)
    test_t = to_tensor(test)

    try:
        score = piq.fsim(ref_t, test_t, data_range=1.0, reduction="none", chromatic=False)
        return float(score.item())
    except Exception:
        return float("nan")

compute_mse(img_gt, img_test)

Compute mean squared error between two images.

Parameters:

Name Type Description Default
img_gt

Ground truth image.

required
img_test

Test image.

required

Returns:

Type Description

MSE value.

Source code in mhrqi/utils/visualization.py
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
def compute_mse(img_gt, img_test):
    """
    Compute mean squared error between two images.

    Args:
        img_gt: Ground truth image.
        img_test: Test image.

    Returns:
        MSE value.
    """
    gt = _to_float_array(img_gt)
    te = _to_float_array(img_test)
    _check_same_shape(gt, te)
    return mean_squared_error(gt, te)

compute_niqe(img_input)

Compute NIQE (Natural Image Quality Evaluator) using scikit-video.

Parameters:

Name Type Description Default
img_input

Input image.

required

Returns:

Type Description

NIQE score. Lower is better.

Source code in mhrqi/utils/visualization.py
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
def compute_niqe(img_input):
    """
    Compute NIQE (Natural Image Quality Evaluator) using scikit-video.

    Args:
        img_input: Input image.

    Returns:
        NIQE score. Lower is better.
    """
    img = _to_float_array(img_input)
    if img.max() <= 1.0:
        img_u8 = (img * 255.0).astype(np.uint8)
    else:
        img_u8 = img.astype(np.uint8)

    if img_u8.ndim == 2:
        img_u8 = img_u8[np.newaxis, ...]
    elif img_u8.ndim == 3:
        if img_u8.shape[2] == 3:
            img_u8 = cv2.cvtColor(img_u8, cv2.COLOR_RGB2GRAY)[np.newaxis, ...]
        else:
            img_u8 = img_u8[np.newaxis, ...]

    try:
        return float(skvideo.measure.niqe(img_u8)[0])
    except Exception:
        return float("nan")

compute_omqdi(img_noisy, img_denoised)

Compute OMQDI (Objective Measure of Quality of Denoised Images).

DOI: 10.1016/j.bspc.2021.102962

Parameters:

Name Type Description Default
img_noisy

Noisy input image (single channel).

required
img_denoised

Denoised output image (single channel).

required

Returns:

Name Type Description
Tuple (OMQDI, EPF, NSF)
  • OMQDI: Combined metric Q1 + Q2, ideal value 2, range [1, 2].
  • EPF: Edge-Preservation Factor (Q1), ideal value 1, range [0, 1].
  • NSF: Noise-Suppression Factor (Q2), ideal value 1, range [0, 1].
Source code in mhrqi/utils/visualization.py
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
def compute_omqdi(img_noisy, img_denoised):
    """
    Compute OMQDI (Objective Measure of Quality of Denoised Images).

    DOI: 10.1016/j.bspc.2021.102962

    Args:
        img_noisy: Noisy input image (single channel).
        img_denoised: Denoised output image (single channel).

    Returns:
        Tuple (OMQDI, EPF, NSF):
            - OMQDI: Combined metric Q1 + Q2, ideal value 2, range [1, 2].
            - EPF: Edge-Preservation Factor (Q1), ideal value 1, range [0, 1].
            - NSF: Noise-Suppression Factor (Q2), ideal value 1, range [0, 1].
    """
    from mhrqi.benchmarks.compare_to import OMQDI

    noisy = _to_float_array(img_noisy)
    denoised = _to_float_array(img_denoised)

    if noisy.max() > 1.0:
        noisy = noisy / 255.0
    if denoised.max() > 1.0:
        denoised = denoised / 255.0

    try:
        omqdi_val, epf, nsf = OMQDI(noisy, denoised)
        return (float(omqdi_val), float(epf), float(nsf))
    except Exception:
        return (float("nan"), float("nan"), float("nan"))

compute_psnr(img_gt, img_test, data_range=255.0)

Compute Peak Signal-to-Noise Ratio (PSNR) in dB.

Parameters:

Name Type Description Default
img_gt

Ground truth image.

required
img_test

Test image.

required
data_range

Dynamic range of the images.

255.0

Returns:

Type Description

PSNR in dB. Higher is better.

Source code in mhrqi/utils/visualization.py
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
def compute_psnr(img_gt, img_test, data_range=255.0):
    """
    Compute Peak Signal-to-Noise Ratio (PSNR) in dB.

    Args:
        img_gt: Ground truth image.
        img_test: Test image.
        data_range: Dynamic range of the images.

    Returns:
        PSNR in dB. Higher is better.
    """
    mse_val = compute_mse(img_gt, img_test)
    if mse_val == 0:
        return float("inf")
    return 10.0 * np.log10((data_range**2) / mse_val)

compute_smpi(img_original, img_filtered)

Compute Speckle Mean Preservation Index (SMPI).

Lower values indicate better speckle suppression with mean preservation.

Parameters:

Name Type Description Default
img_original

Original noisy image.

required
img_filtered

Filtered image.

required

Returns:

Type Description

SMPI value. Lower is better.

Source code in mhrqi/utils/visualization.py
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
def compute_smpi(img_original, img_filtered):
    """
    Compute Speckle Mean Preservation Index (SMPI).

    Lower values indicate better speckle suppression with mean preservation.

    Args:
        img_original: Original noisy image.
        img_filtered: Filtered image.

    Returns:
        SMPI value. Lower is better.
    """
    original = _to_float_array(img_original)
    filtered = _to_float_array(img_filtered)

    mean_o = np.mean(original)
    mean_f = np.mean(filtered)
    var_o = np.var(original)
    var_f = np.var(filtered)

    q = 1 + np.abs(mean_o - mean_f)

    if var_o == 0:
        return float("inf")

    smpi = q * (np.sqrt(var_f) / np.sqrt(var_o))
    return float(smpi)

compute_ssi(img_noisy, img_filtered, roi)

Compute Speckle Suppression Index (SSI).

SSI = (std_filtered / mean_filtered) / (std_noisy / mean_noisy), evaluated on a homogeneous ROI. Lower is better.

Parameters:

Name Type Description Default
img_noisy

Noisy input image.

required
img_filtered

Filtered output image.

required
roi

Region of interest as (y, x, h, w) or array index.

required

Returns:

Type Description

SSI value. Lower is better.

Source code in mhrqi/utils/visualization.py
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
def compute_ssi(img_noisy, img_filtered, roi):
    """
    Compute Speckle Suppression Index (SSI).

    SSI = (std_filtered / mean_filtered) / (std_noisy / mean_noisy),
    evaluated on a homogeneous ROI. Lower is better.

    Args:
        img_noisy: Noisy input image.
        img_filtered: Filtered output image.
        roi: Region of interest as (y, x, h, w) or array index.

    Returns:
        SSI value. Lower is better.
    """
    img_n = _to_float_array(img_noisy)
    img_f = _to_float_array(img_filtered)

    if isinstance(roi, tuple) and len(roi) == 4:
        y, x, h, w = roi
        reg_n = img_n[y : y + h, x : x + w]
        reg_f = img_f[y : y + h, x : x + w]
    else:
        reg_n = img_n[roi]
        reg_f = img_f[roi]

    m_n = np.mean(reg_n)
    s_n = np.std(reg_n)
    m_f = np.mean(reg_f)
    s_f = np.std(reg_f)

    eps = 1e-10
    if m_n < eps or m_f < eps:
        return float("inf")

    cov_n = s_n / m_n
    cov_f = s_f / m_f

    if cov_n < eps:
        return float("inf")

    return float(cov_f / cov_n)

compute_ssim(img_gt, img_test, data_range=255.0)

Compute Structural Similarity Index (SSIM).

Parameters:

Name Type Description Default
img_gt

Ground truth image.

required
img_test

Test image.

required
data_range

Dynamic range of the images.

255.0

Returns:

Type Description

SSIM score. Higher is better.

Source code in mhrqi/utils/visualization.py
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
def compute_ssim(img_gt, img_test, data_range=255.0):
    """
    Compute Structural Similarity Index (SSIM).

    Args:
        img_gt: Ground truth image.
        img_test: Test image.
        data_range: Dynamic range of the images.

    Returns:
        SSIM score. Higher is better.
    """
    gt = _to_float_array(img_gt)
    te = _to_float_array(img_test)
    _check_same_shape(gt, te)
    return structural_similarity(gt, te, data_range=data_range)

get_run_dir(run_dir=None)

Get or create the current run output directory.

Parameters:

Name Type Description Default
run_dir

If provided, use this path directly.

None

Returns:

Type Description

Path to the run output directory.

Source code in mhrqi/utils/visualization.py
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
def get_run_dir(run_dir=None):
    """
    Get or create the current run output directory.

    Args:
        run_dir: If provided, use this path directly.

    Returns:
        Path to the run output directory.
    """
    global _current_run_dir
    if run_dir is not None:
        os.makedirs(run_dir, exist_ok=True)
        _current_run_dir = run_dir
        return run_dir
    if _current_run_dir is not None:
        return _current_run_dir
    date_str = datetime.datetime.now().strftime("%Y%m%d_%H%M")
    _current_run_dir = os.path.join("runs", date_str)
    os.makedirs(_current_run_dir, exist_ok=True)
    return _current_run_dir

grid_to_image_uint8(grid, vmin=None, vmax=None, flip_vertical=False)

Convert an N x N grid into a uint8 image.

NaN values are replaced with 0 before scaling.

Parameters:

Name Type Description Default
grid

2D numpy array.

required
vmin

Minimum value for scaling. Inferred from data if None.

None
vmax

Maximum value for scaling. Inferred from data if None.

None
flip_vertical

If True, flip the image vertically.

False

Returns:

Type Description

uint8 image array of shape (N, N).

Source code in mhrqi/utils/visualization.py
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
def grid_to_image_uint8(grid, vmin=None, vmax=None, flip_vertical=False):
    """
    Convert an N x N grid into a uint8 image.

    NaN values are replaced with 0 before scaling.

    Args:
        grid: 2D numpy array.
        vmin: Minimum value for scaling. Inferred from data if None.
        vmax: Maximum value for scaling. Inferred from data if None.
        flip_vertical: If True, flip the image vertically.

    Returns:
        uint8 image array of shape (N, N).
    """
    work = np.array(grid, dtype=float)
    work[np.isnan(work)] = 0.0

    finite = np.isfinite(grid)
    if not finite.any():
        vmin, vmax = 0.0, 1.0
    else:
        vmin = np.nanmin(grid) if vmin is None else vmin
        vmax = np.nanmax(grid) if vmax is None else vmax
        if vmax == vmin:
            vmax = vmin + 1.0

    img = (np.clip(work, vmin, vmax) - vmin) / (vmax - vmin)
    img = (img * 255.0).round().astype(np.uint8)

    if flip_vertical:
        img = np.flipud(img)

    return img

plot_bias_map(bias_stats, original_img, N, d, run_dir=None)

Visualize the denoiser confidence map derived from the outcome ancilla.

Parameters:

Name Type Description Default
bias_stats

Dict mapping position vectors to hit/miss stats.

required
original_img

Original grayscale image in [0, 1].

required
N

Image size.

required
d

Qudit dimension.

required
run_dir

Output directory.

None

Returns:

Type Description

Confidence ratio map as a 2D numpy array, or None if no stats.

Source code in mhrqi/utils/visualization.py
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
def plot_bias_map(bias_stats, original_img, N, d, run_dir=None):
    """
    Visualize the denoiser confidence map derived from the outcome ancilla.

    Args:
        bias_stats: Dict mapping position vectors to hit/miss stats.
        original_img: Original grayscale image in [0, 1].
        N: Image size.
        d: Qudit dimension.
        run_dir: Output directory.

    Returns:
        Confidence ratio map as a 2D numpy array, or None if no stats.
    """
    if bias_stats is None:
        print("No bias stats to plot.")
        return None

    bias_map = np.zeros((N, N))
    for vec, stats in bias_stats.items():
        r, c = utils.compose_rc(vec, d)
        hit = stats.get("hit", 0)
        miss = stats.get("miss", 0)
        total = hit + miss
        ratio = hit / total if total > 0 else 0.5
        bias_map[r, c] = ratio

    dir_path = get_run_dir(run_dir)

    fig, axes = plt.subplots(1, 2, figsize=(10, 5))

    if original_img is not None:
        axes[0].imshow(original_img, cmap="gray", vmin=0, vmax=1)
    axes[0].set_title("Original Image")
    axes[0].axis("off")

    im = axes[1].imshow(bias_map, cmap="viridis", vmin=0, vmax=1)
    axes[1].set_title("Bias Confidence (Hit Ratio)")
    axes[1].axis("off")
    fig.colorbar(im, ax=axes[1], fraction=0.046, pad=0.04)

    plt.tight_layout()
    plt.savefig(os.path.join(dir_path, "bias_map.png"), dpi=150, bbox_inches="tight")

    if not HEADLESS:
        plt.show()
    plt.close()

    return bias_map

plot_mse_map(img_gt, img_test, title='Per-pixel squared error', run_dir=None)

Save a per-pixel squared error heatmap.

Parameters:

Name Type Description Default
img_gt

Ground truth image.

required
img_test

Test image.

required
title

Plot title.

'Per-pixel squared error'
run_dir

Output directory.

None
Source code in mhrqi/utils/visualization.py
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
def plot_mse_map(img_gt, img_test, title="Per-pixel squared error", run_dir=None):
    """
    Save a per-pixel squared error heatmap.

    Args:
        img_gt: Ground truth image.
        img_test: Test image.
        title: Plot title.
        run_dir: Output directory.
    """
    gt = _to_float_array(img_gt)
    te = _to_float_array(img_test)
    _check_same_shape(gt, te)

    if gt.ndim == 3:
        se = np.mean((gt - te) ** 2, axis=2)
    else:
        se = (gt - te) ** 2

    plt.figure()
    im = plt.imshow(se, cmap="RdYlGn_r")
    plt.title(title)
    plt.axis("off")
    cbar = plt.colorbar(im)
    cbar.set_label("squared error")

    dir_path = get_run_dir(run_dir)
    plt.savefig(os.path.join(dir_path, "mse_map.png"), dpi=150, bbox_inches="tight")

plot_shots_vs_mse(shots, mse_values, title='Shots vs MSE', run_dir=None)

Plot and save a shots vs MSE trend graph.

Parameters:

Name Type Description Default
shots

List of shot counts.

required
mse_values

List of MSE values.

required
title

Plot title.

'Shots vs MSE'
run_dir

Output directory.

None
Source code in mhrqi/utils/visualization.py
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
def plot_shots_vs_mse(shots, mse_values, title="Shots vs MSE", run_dir=None):
    """
    Plot and save a shots vs MSE trend graph.

    Args:
        shots: List of shot counts.
        mse_values: List of MSE values.
        title: Plot title.
        run_dir: Output directory.
    """
    if len(shots) != len(mse_values):
        raise ValueError("shots and mse_values length mismatch")
    plt.figure()
    plt.plot(shots, mse_values)
    plt.xlabel("Shots")
    plt.ylabel("MSE")
    plt.title(title)
    plt.grid(True)

    dir_path = get_run_dir(run_dir)
    plt.savefig(os.path.join(dir_path, "shots_vs_mse.png"), dpi=150, bbox_inches="tight")

reset_run_dir()

Reset the cached run directory.

Source code in mhrqi/utils/visualization.py
89
90
91
92
def reset_run_dir():
    """Reset the cached run directory."""
    global _current_run_dir
    _current_run_dir = None

save_settings_plot(settings_dict, run_dir=None, filename='settings.png')

Create a visual table of run settings and save it as a PNG.

Parameters:

Name Type Description Default
settings_dict

Dict of setting names to values.

required
run_dir

Output directory. Uses get_run_dir() if None.

None
filename

Output filename.

'settings.png'
Source code in mhrqi/utils/visualization.py
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
def save_settings_plot(settings_dict, run_dir=None, filename="settings.png"):
    """
    Create a visual table of run settings and save it as a PNG.

    Args:
        settings_dict: Dict of setting names to values.
        run_dir: Output directory. Uses get_run_dir() if None.
        filename: Output filename.
    """
    run_dir = get_run_dir(run_dir)

    fig, ax = plt.subplots(figsize=(6, max(2, len(settings_dict) * 0.4)))
    ax.axis("off")

    table_data = [[k, str(v)] for k, v in settings_dict.items()]

    table = ax.table(
        cellText=table_data,
        colLabels=["Setting", "Value"],
        loc="center",
        cellLoc="left",
        colWidths=[0.4, 0.6],
    )
    table.auto_set_font_size(False)
    table.set_fontsize(10)
    table.scale(1.2, 1.5)

    for i in range(2):
        table[(0, i)].set_facecolor("#4472C4")
        table[(0, i)].set_text_props(color="white", weight="bold")

    plt.title("Run Settings", fontsize=12, weight="bold", pad=20)
    plt.tight_layout()
    plt.savefig(os.path.join(run_dir, filename), dpi=150, bbox_inches="tight")
    plt.close(fig)

show_image_comparison(orig_img, recon_img, titles=('Original', 'Reconstructed'), run_dir=None, img_name=None)

Plot two images side by side and save the comparison.

Parameters:

Name Type Description Default
orig_img

Original image (2D uint8 preferred).

required
recon_img

Reconstructed image (2D uint8 preferred).

required
titles

Tuple of display titles for the two images.

('Original', 'Reconstructed')
run_dir

Output directory. Uses get_run_dir() if None.

None
img_name

Base name for saved files.

None
Source code in mhrqi/utils/visualization.py
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
def show_image_comparison(
    orig_img, recon_img, titles=("Original", "Reconstructed"), run_dir=None, img_name=None
):
    """
    Plot two images side by side and save the comparison.

    Args:
        orig_img: Original image (2D uint8 preferred).
        recon_img: Reconstructed image (2D uint8 preferred).
        titles: Tuple of display titles for the two images.
        run_dir: Output directory. Uses get_run_dir() if None.
        img_name: Base name for saved files.
    """
    fig, axes = plt.subplots(1, 2, figsize=(8, 4))
    axes[0].imshow(orig_img, cmap="gray", vmin=0, vmax=255)
    axes[0].set_title(titles[0])
    axes[0].set_xticks([])
    axes[0].set_yticks([])

    axes[1].imshow(recon_img, cmap="gray", vmin=0, vmax=255)
    axes[1].set_title(titles[1])
    axes[1].set_xticks([])
    axes[1].set_yticks([])

    plt.tight_layout()

    dir_path = get_run_dir(run_dir)
    img_base = img_name or "reconstructed"
    plt.savefig(os.path.join(dir_path, f"{img_base}_comparison.png"), dpi=150, bbox_inches="tight")
    recon_img_uint8 = recon_img.astype(np.uint8) if recon_img.dtype != np.uint8 else recon_img
    cv2.imwrite(os.path.join(dir_path, f"{img_base}.png"), recon_img_uint8)