Skip to content

References

args = parser.parse_args() module-attribute

napari_ui = 'napari' in params['general'].get('ui', '') module-attribute

params = yaml.safe_load(file) module-attribute

parser = argparse.ArgumentParser(description=f'multiview-stitcher') module-attribute

pipeline = Pipeline(params, viewer) module-attribute

viewer = napari.Viewer() module-attribute

MVSRegistration

MVSRegistration

Source code in src\MVSRegistration.py
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
class MVSRegistration:
    def __init__(self, params_general):
        super().__init__()
        self.params_general = params_general

        params_logging = self.params_general.get('logging', {})
        self.verbose = params_logging.get('verbose', False)
        self.logging_dask = params_logging.get('dask', False)
        self.logging_time = params_logging.get('time', False)
        self.ui = self.params_general.get('ui', '')
        self.mpl_ui = ('mpl' in self.ui or 'plot' in self.ui)
        self.napari_ui = ('napari' in self.ui)
        self.source_transform_key = 'source_metadata'
        self.reg_transform_key = 'registered'
        self.transition_transform_key = 'transition'

        logging.info(f'Multiview-stitcher version: {multiview_stitcher.__version__}')

    def run_operation(self, fileset_label, filenames, params, global_rotation=None, global_center=None):
        self.fileset_label = fileset_label
        self.filenames = filenames
        self.file_labels = get_unique_file_labels(filenames)
        self.params = params
        self.global_rotation = global_rotation
        self.global_center = global_center

        input_dir = os.path.dirname(filenames[0])
        parts = split_numeric_dict(filenames[0])
        output_pattern = params['output'].format_map(parts)
        self.output = os.path.join(input_dir, output_pattern)    # preserve trailing slash: do not use os.path.normpath()

        with ProgressBar(minimum=10, dt=1) if self.logging_dask else nullcontext():
            return self._run_operation()

    def _run_operation(self):
        params = self.params
        filenames = self.filenames
        file_labels = self.file_labels
        output = self.output

        operation = params['operation']
        overlap_threshold = params.get('overlap_threshold', 0.5)
        source_metadata = import_metadata(params.get('source_metadata', {}), input_path=params['input'])
        save_images = params.get('save_images', True)
        target_scale = params.get('scale')
        extra_metadata = import_metadata(params.get('extra_metadata', {}), input_path=params['input'])
        channels = extra_metadata.get('channels', [])
        normalise_orientation = 'norm' in source_metadata

        show_original = self.params_general.get('show_original', False)
        output_params = self.params_general.get('output', {})
        clear = output_params.get('clear', False)
        overwrite = output_params.get('overwrite', True)

        is_stack = ('stack' in operation)
        is_3d = ('3d' in operation)
        is_simple_stack = is_stack and not is_3d
        is_transition = ('transition' in operation)
        is_channel_overlay = (len(channels) > 1)

        mappings_header = ['id','x_pixels', 'y_pixels', 'z_pixels', 'x', 'y', 'z', 'rotation']

        if len(filenames) == 0:
            logging.warning('Skipping (no images)')
            return False

        registered_fused_filename = output + 'registered'
        mappings_filename = os.path.join(output, params.get('mappings', 'mappings.json'))

        output_dir = os.path.dirname(output)
        if not overwrite and exists_output_image(registered_fused_filename, output_params.get('format')):
            logging.warning(f'Skipping existing output {os.path.normpath(output_dir)}')
            return False
        if clear:
            shutil.rmtree(output_dir, ignore_errors=True)
        if not os.path.exists(output_dir):
            os.makedirs(output_dir)

        with Timer('init sims', self.logging_time):
            sims, scales, positions, rotations = self.init_sims(target_scale=target_scale)

        with Timer('pre-process', self.logging_time):
            sims, register_sims, indices = self.preprocess(sims, params)

        data = []
        for label, sim, scale in zip(file_labels, sims, scales):
            position, rotation = get_data_mapping(sim, transform_key=self.source_transform_key)
            position_pixels = np.array(position) / scale
            row = [label] + list(position_pixels) + list(position) + [rotation]
            data.append(row)
        export_csv(output + 'prereg_mappings.csv', data, header=mappings_header)

        if show_original:
            # before registration:
            logging.info('Exporting original...')
            original_positions_filename = output + 'positions_original.pdf'

            with Timer('plot positions', self.logging_time):
                vis_utils.plot_positions(sims, transform_key=self.source_transform_key,
                                         use_positional_colors=False, view_labels=file_labels, view_labels_size=3,
                                         show_plot=self.mpl_ui, output_filename=original_positions_filename)

            if self.napari_ui:
                shapes = [get_sim_shape_2d(sim, transform_key=self.source_transform_key) for sim in sims]
                self.update_napari_signal.emit(f'{self.fileset_label} original', shapes, file_labels)

            if save_images:
                if output_params.get('thumbnail'):
                    with Timer('create thumbnail', self.logging_time):
                        self.save_thumbnail(output + 'thumb_original',
                                            nom_sims=sims,
                                            transform_key=self.source_transform_key)

                original_fused = self.fuse(sims, transform_key=self.source_transform_key)

                original_fused_filename = output + 'original'
                save_image(original_fused_filename, output_params.get('format'), original_fused, channels=channels,
                           transform_key=self.source_transform_key, params=output_params)

        if len(filenames) == 1 and save_images:
            logging.warning('Skipping registration (single image)')
            save_image(registered_fused_filename, output_params.get('format'), sims[0], channels=channels,
                       translation0=positions[0], params=output_params)
            return False

        is_simple_stack = is_stack and not is_3d
        _, has_overlaps = self.validate_overlap(sims, file_labels, is_simple_stack, is_simple_stack or is_channel_overlay)
        overall_overlap = np.mean(has_overlaps)
        if overall_overlap < overlap_threshold:
            raise ValueError(f'Not enough overlap: {overall_overlap * 100:.1f}%')

        if not overwrite and os.path.exists(mappings_filename):
            logging.info('Loading registration mappings...')
            # load registration mappings
            mappings = import_json(mappings_filename)
            # copy transforms to sims
            for sim, label in zip(sims, file_labels):
                mapping = param_utils.affine_to_xaffine(np.array(mappings[label]))
                if is_stack:
                    transform = param_utils.identity_transform(ndim=3)
                    transform.loc[{dim: mapping.coords[dim] for dim in mapping.dims}] = mapping
                else:
                    transform = mapping
                si_utils.set_sim_affine(sim, transform, transform_key=self.reg_transform_key)
        else:
            with Timer('register', self.logging_time):
                results = self.register(sims, register_sims, indices, params)

            reg_result = results['reg_result']
            sims = results['sims']

            logging.info('Exporting registered...')
            metrics = self.calc_metrics(results, file_labels)
            mappings = metrics['mappings']
            logging.info(metrics['summary'])
            export_json(mappings_filename, mappings)
            export_json(output + 'metrics.json', metrics)
            data = []
            for sim, (label, mapping), scale, position, rotation in zip(sims, mappings.items(), scales, positions, rotations):
                if not normalise_orientation:
                    # rotation already in msim affine transform
                    rotation = None
                position, rotation = get_data_mapping(sim, transform_key=self.reg_transform_key,
                                                      transform=np.array(mapping),
                                                      translation0=position,
                                                      rotation=rotation)
                position_pixels = np.array(position) / scale
                row = [label] + list(position_pixels) + list(position) + [rotation]
                data.append(row)
            export_csv(output + 'mappings.csv', data, header=mappings_header)

            for reg_label, reg_item in reg_result.items():
                if isinstance(reg_item, dict):
                    summary_plot = reg_item.get('summary_plot')
                    if summary_plot is not None:
                        figure, axes = summary_plot
                        summary_plot_filename = output + f'{reg_label}.pdf'
                        figure.savefig(summary_plot_filename)

        registered_positions_filename = output + 'positions_registered.pdf'
        with Timer('plot positions', self.logging_time):
            vis_utils.plot_positions(sims, transform_key=self.reg_transform_key,
                                     use_positional_colors=False, view_labels=file_labels, view_labels_size=3,
                                     show_plot=self.mpl_ui, output_filename=registered_positions_filename)

        if self.napari_ui:
            shapes = [get_sim_shape_2d(sim, transform_key=self.reg_transform_key) for sim in sims]
            self.update_napari_signal.emit(f'{self.fileset_label} registered', shapes, file_labels)

        if save_images:
            if output_params.get('thumbnail'):
                with Timer('create thumbnail', self.logging_time):
                    self.save_thumbnail(output + 'thumb', nom_sims=sims, transform_key=self.reg_transform_key)

            with Timer('fuse image', self.logging_time):
                fused_image = self.fuse(sims)

            logging.info('Saving fused image...')
            with Timer('save fused image', self.logging_time):
                save_image(registered_fused_filename, output_params.get('format'), fused_image, channels=channels,
                           transform_key=self.reg_transform_key, translation0=positions[0], params=output_params)

        if is_transition:
            self.save_video(output, sims, fused_image)

        return True

    def init_sims(self, target_scale=None):
        operation = self.params['operation']
        source_metadata = import_metadata(self.params.get('source_metadata', 'source'), input_path=self.params['input'])
        chunk_size = self.params_general.get('chunk_size', [1024, 1024])
        extra_metadata = import_metadata(self.params.get('extra_metadata', {}), input_path=self.params['input'])
        z_scale = extra_metadata.get('scale', {}).get('z')

        logging.info('Initialising sims...')
        sources = [create_dask_source(file, source_metadata) for file in self.filenames]
        source0 = sources[0]
        images = []
        sims = []
        scales = []
        translations = []
        rotations = []

        is_stack = ('stack' in operation)
        has_z_size = (source0.get_size().get('z', 0) > 0)
        is_3d = (has_z_size or '3d' in operation)
        pyramid_level = 0

        output_order = 'zyx' if is_stack or is_3d else 'yx'
        ndims = len(output_order)
        if source0.get_nchannels() > 1:
            output_order += 'c'

        last_z_position = None
        different_z_positions = False
        delta_zs = []
        for filename, source in zip(self.filenames, sources):
            scale = source.get_pixel_size()
            translation = source.get_position()
            rotation = source.get_rotation()

            if target_scale:
                pyramid_level = np.argmin(abs(np.array(source.scales) - target_scale))
                pyramid_scale = source.scales[pyramid_level]
                scale = {dim: size * pyramid_scale if dim in 'xy' else size for dim, size in scale.items()}
            if 'invert' in source_metadata:
                translation[0] = -translation[0]
                translation[1] = -translation[1]
            if len(translation) >= 3:
                z_position = translation['z']
            else:
                z_position = 0
            if last_z_position is not None and z_position != last_z_position:
                different_z_positions = True
                delta_zs.append(z_position - last_z_position)
            if self.global_rotation is not None:
                rotation = self.global_rotation

            dask_data = source.get_data(level=pyramid_level)
            image = redimension_data(dask_data, source.dimension_order, output_order)

            scales.append(scale)
            translations.append(translation)
            rotations.append(rotation)
            images.append(image)
            last_z_position = z_position

        if z_scale is None:
            if len(delta_zs) > 0:
                z_scale = np.min(delta_zs)
            else:
                z_scale = 1

        if 'norm' in source_metadata:
            size = np.array(source0.get_size()) * source0.get_pixel_size_micrometer()
            center = None
            if 'center' in source_metadata:
                if 'global' in source_metadata:
                    center = self.global_center
                else:
                    center = np.mean(translations, 0)
            elif 'origin' in source_metadata:
                center = np.zeros(ndims)
            translations, rotations = normalise_rotated_positions(translations, rotations, size, center)

        #translations = [np.array(translation) * 1.25 for translation in translations]

        increase_z_positions = is_stack and not different_z_positions
        z_position = 0
        scales2 = []
        translations2 = []
        for source, image, scale, translation, rotation, file_label in zip(sources, images, scales, translations, rotations, self.file_labels):
            # transform #dimensions need to match
            if len(scale) > 0 and 'z' not in scale:
                scale['z'] = abs(z_scale)
            if (len(translation) > 0 and 'z' not in translation) or increase_z_positions:
                translation['z'] = z_position
            if increase_z_positions:
                z_position += z_scale
            channel_labels = [channel.get('label', '') for channel in source.get_channels()]
            if rotation is None or 'norm' in source_metadata:
                # if positions are normalised, don't use rotation
                transform = None
            else:
                transform = param_utils.invert_coordinate_order(
                    create_transform(translation, rotation, matrix_size=ndims + 1)
                )
            if file_label in extra_metadata:
                transform2 = extra_metadata[file_label]
                if transform is None:
                    transform = np.array(transform2)
                else:
                    transform = np.array(combine_transforms([transform, transform2]))
            sim = si_utils.get_sim_from_array(
                image,
                dims=list(output_order),
                scale=scale,
                translation=translation,
                affine=transform,
                transform_key=self.source_transform_key,
                c_coords=channel_labels
            )
            if len(sim.chunksizes.get('x')) == 1 and len(sim.chunksizes.get('y')) == 1:
                sim = sim.chunk(xyz_to_dict(chunk_size))
            sims.append(sim)
            scales2.append(dict_to_xyz(scale))
            translations2.append(dict_to_xyz(translation))
        return sims, scales2, translations2, rotations

    def validate_overlap(self, sims, labels, is_stack=False, expect_large_overlap=False):
        min_dists = []
        has_overlaps = []
        n = len(sims)
        positions = [get_sim_position_final(sim) for sim in sims]
        sizes = [np.linalg.norm(get_sim_physical_size(sim)) for sim in sims]
        for i in range(n):
            norm_dists = []
            # check if only single z slices
            if is_stack:
                if i + 1 < n:
                    compare_indices = [i + 1]
                else:
                    compare_indices = []
            else:
                compare_indices = range(n)
            for j in compare_indices:
                if not j == i:
                    distance = math.dist(positions[i], positions[j])
                    norm_dist = distance / np.mean([sizes[i], sizes[j]])
                    norm_dists.append(norm_dist)
            if len(norm_dists) > 0:
                norm_dist = min(norm_dists)
                min_dists.append(float(norm_dist))
                if norm_dist >= 1:
                    logging.warning(f'{labels[i]} has no overlap')
                    has_overlaps.append(False)
                elif expect_large_overlap and norm_dist > 0.5:
                    logging.warning(f'{labels[i]} has small overlap')
                    has_overlaps.append(False)
                else:
                    has_overlaps.append(True)
        return min_dists, has_overlaps

    def preprocess(self, sims, params):
        flatfield_quantiles = params.get('flatfield_quantiles')
        normalisation = params.get('normalisation', '')
        filter_foreground = params.get('filter_foreground', False)

        if filter_foreground:
            foreground_map = calc_foreground_map(sims)
        else:
            foreground_map = None
        if flatfield_quantiles is not None:
            logging.info('Flat-field correction...')
            new_sims = [None] * len(sims)
            for sim_indices in group_sims_by_z(sims):
                sims_z_set = [sims[i] for i in sim_indices]
                foreground_map_z_set = [foreground_map[i] for i in sim_indices] if foreground_map is not None else None
                new_sims_z_set = flatfield_correction(sims_z_set, self.source_transform_key, flatfield_quantiles,
                                                      foreground_map=foreground_map_z_set)
                for sim_index, sim in zip(sim_indices, new_sims_z_set):
                    new_sims[sim_index] = sim
            sims = new_sims

        if normalisation:
            use_global = ('global' in normalisation)
            if use_global:
                logging.info('Normalising (global)...')
            else:
                logging.info('Normalising (individual)...')
            new_sims = normalise(sims, self.source_transform_key, use_global=use_global)
        else:
            new_sims = sims

        if filter_foreground:
            logging.info('Filtering foreground images...')
            #tile_vars = np.array([np.asarray(np.std(sim)).item() for sim in sims])
            #threshold1 = np.mean(tile_vars)
            #threshold2 = np.median(tile_vars)
            #threshold3, _ = cv.threshold(np.array(tile_vars).astype(np.uint16), 0, 1, cv.THRESH_OTSU)
            #threshold = min(threshold1, threshold2, threshold3)
            #foregrounds = (tile_vars >= threshold)
            new_sims = [sim for sim, is_foreground in zip(new_sims, foreground_map) if is_foreground]
            logging.info(f'Foreground images: {len(new_sims)} / {len(sims)}')
            indices = np.where(foreground_map)[0]
        else:
            indices = range(len(sims))
        return sims, new_sims, indices

    def register(self, sims, register_sims, indices, params):
        sim0 = sims[0]
        ndims = si_utils.get_ndim_from_sim(sim0)

        operation = params['operation']
        reg_params = params.get('method')
        if isinstance(reg_params, dict):
            reg_method = reg_params.get('name', '').lower()
        else:
            reg_method = reg_params.lower()
        use_orthogonal_pairs = params.get('use_orthogonal_pairs', False)

        is_stack = ('stack' in operation)
        is_3d = ('3d' in operation)
        debug = self.params_general.get('debug', False)

        reg_channel = params.get('channel', 0)
        if isinstance(reg_channel, int):
            reg_channel_index = reg_channel
            reg_channel = None
        else:
            reg_channel_index = None

        groupwise_resolution_kwargs = {
            'transform': params.get('transform_type')  # options include 'translation', 'rigid', 'affine'
        }
        pairwise_reg_func_kwargs = None
        if is_stack and not is_3d:
            # register in 2d; pairwise consecutive views
            register_sims = [si_utils.max_project_sim(sim, dim='z') for sim in register_sims]
            pairs = [(index, index + 1) for index in range(len(register_sims) - 1)]
        elif use_orthogonal_pairs:
            origins = np.array([get_sim_position_final(sim) for sim in register_sims])
            size = get_sim_physical_size(sim0)
            pairs, _ = get_orthogonal_pairs(origins, size)
            logging.info(f'#pairs: {len(pairs)}')
            for pair in pairs:
                print(f'{self.file_labels[pair[0]]} - {self.file_labels[pair[1]]}')
        else:
            pairs = None

        if is_3d:
            overlap_tolerance = {'z': 1}
        else:
            overlap_tolerance = None

        if '3din2d' in reg_method:
            from src.registration_methods.RegistrationMethodANTs3Din2D import RegistrationMethodANTs3Din2D
            registration_method = RegistrationMethodANTs3Din2D(sim0, reg_params, debug)
            pairwise_reg_func = registration_method.registration
        elif 'cpd' in reg_method:
            from src.registration_methods.RegistrationMethodCPD import RegistrationMethodCPD
            registration_method = RegistrationMethodCPD(sim0, reg_params, debug)
            pairwise_reg_func = registration_method.registration
        elif 'feature' in reg_method or 'orb' in reg_method or 'sift' in reg_method:
            if 'cv' in reg_method:
                from src.registration_methods.RegistrationMethodCvFeatures import RegistrationMethodCvFeatures
                registration_method = RegistrationMethodCvFeatures(sim0, reg_params, debug)
            else:
                from src.registration_methods.RegistrationMethodSkFeatures import RegistrationMethodSkFeatures
                registration_method = RegistrationMethodSkFeatures(sim0, reg_params, debug)
            pairwise_reg_func = registration_method.registration
        elif 'ant' in reg_method:
            pairwise_reg_func = registration.registration_ANTsPy
            # args for ANTsPy registration: used internally by ANYsPy algorithm
            pairwise_reg_func_kwargs = {
                'transform_types': ['Rigid'],
                "aff_random_sampling_rate": 0.5,
                "aff_iterations": (2000, 2000, 1000, 1000),
                "aff_smoothing_sigmas": (4, 2, 1, 0),
                "aff_shrink_factors": (16, 8, 2, 1),
            }
        else:
            pairwise_reg_func = registration.phase_correlation_registration

        # Pass registration through metrics method
        #from src.registration_methods.RegistrationMetrics import RegistrationMetrics
        #registration_metrics = RegistrationMetrics(sim0, pairwise_reg_function)
        #pairwise_reg_function = registration_metrics.registration
        # TODO: extract metrics from registration_metrics

        logging.info(f'Registration method: {reg_method}')

        try:
            logging.info('Registering...')
            register_msims = [msi_utils.get_msim_from_sim(sim) for sim in register_sims]
            reg_result = registration.register(
                register_msims,
                reg_channel=reg_channel,
                reg_channel_index=reg_channel_index,
                transform_key=self.source_transform_key,
                new_transform_key=self.reg_transform_key,

                pairs=pairs,
                pre_registration_pruning_method=None,

                pairwise_reg_func=pairwise_reg_func,
                pairwise_reg_func_kwargs=pairwise_reg_func_kwargs,
                groupwise_resolution_kwargs=groupwise_resolution_kwargs,

                post_registration_do_quality_filter=True,
                post_registration_quality_threshold=0.1,

                plot_summary=self.mpl_ui,
                return_dict=True,

                overlap_tolerance=overlap_tolerance,
            )
            # copy transforms from register sims to unmodified sims
            for reg_msim, index in zip(register_msims, indices):
                si_utils.set_sim_affine(
                    sims[index],
                    msi_utils.get_transform_from_msim(reg_msim, transform_key=self.reg_transform_key),
                    transform_key=self.reg_transform_key)

            # set missing transforms
            for sim in sims:
                if self.reg_transform_key not in si_utils.get_tranform_keys_from_sim(sim):
                    si_utils.set_sim_affine(
                        sim,
                        param_utils.identity_transform(ndim=ndims, t_coords=[0]),
                        transform_key=self.reg_transform_key)

            mappings = reg_result['params']
            # re-index from subset of sims
            residual_error_dict = reg_result.get('groupwise_resolution', {}).get('metrics', {}).get('residuals', {})
            residual_error_dict = {(indices[key[0]], indices[key[1]]): value.item()
                                   for key, value in residual_error_dict.items()}
            registration_qualities_dict = reg_result.get('pairwise_registration', {}).get('metrics', {}).get('qualities', {})
            registration_qualities_dict = {(indices[key[0]], indices[key[1]]): value
                                           for key, value in registration_qualities_dict.items()}
        except NotEnoughOverlapError:
            logging.warning('Not enough overlap')
            reg_result = {}
            mappings = [param_utils.identity_transform(ndim=ndims, t_coords=[0])] * len(sims)
            residual_error_dict = {}
            registration_qualities_dict = {}

        # re-index from subset of sims
        mappings_dict = {index: mapping for index, mapping in zip(indices, mappings)}

        if is_stack:
            # set 3D affine transforms from 2D registration params
            for index, sim in enumerate(sims):
                # check if already 3D
                if 4 not in si_utils.get_affine_from_sim(sim, transform_key=self.reg_transform_key).shape:
                    affine_3d = param_utils.identity_transform(ndim=3)
                    affine_3d.loc[{dim: mappings[index].coords[dim] for dim in mappings[index].sel(t=0).dims}] = mappings[index].sel(t=0)
                    si_utils.set_sim_affine(sim, affine_3d, transform_key=self.reg_transform_key)

        return {'reg_result': reg_result,
                'mappings': mappings_dict,
                'residual_errors': residual_error_dict,
                'registration_qualities': registration_qualities_dict,
                'sims': sims,
                'pairs': pairs}

    def fuse(self, sims, transform_key=None):
        if transform_key is None:
            transform_key = self.reg_transform_key
        operation = self.params['operation']
        extra_metadata = import_metadata(self.params.get('extra_metadata', {}), input_path=self.params['input'])
        channels = extra_metadata.get('channels', [])
        z_scale = extra_metadata.get('scale', {}).get('z')
        if z_scale is None:
            if 'z' in sims[0].dims:
                z_scale = np.min(np.diff(sorted(set([si_utils.get_origin_from_sim(sim).get('z', 0) for sim in sims]))))
        if not z_scale:
            z_scale = 1

        is_3d = ('3d' in operation)
        is_channel_overlay = (len(channels) > 1)

        sim0 = sims[0]
        source_type = sim0.dtype

        output_stack_properties = calc_output_properties(sims, transform_key, z_scale=z_scale)
        if is_channel_overlay:
            # convert to multichannel images
            if self.verbose:
                logging.info(f'Output stack: {output_stack_properties}')
            data_size = np.prod(list(output_stack_properties['shape'].values())) * len(sims) * source_type.itemsize
            logging.info(f'Fusing channels {print_hbytes(data_size)}')

            channel_sims = [fusion.fuse(
                [sim],
                transform_key=transform_key,
                output_stack_properties=output_stack_properties
            ) for sim in sims]
            channel_sims = [sim.assign_coords({'c': [channels[simi]['label']]}) for simi, sim in enumerate(channel_sims)]
            fused_image = xr.combine_nested([sim.rename() for sim in channel_sims], concat_dim='c', combine_attrs='override')
        else:
            if is_3d:
                z_positions = sorted(set([si_utils.get_origin_from_sim(sim).get('z', 0) for sim in sims]))
                z_shape = len(z_positions)
                if z_shape <= 1:
                    z_shape = len(sims)
                output_stack_properties['shape']['z'] = z_shape
            if self.verbose:
                logging.info(f'Output stack: {output_stack_properties}')
            data_size = np.prod(list(output_stack_properties['shape'].values())) * source_type.itemsize
            logging.info(f'Fusing {print_hbytes(data_size)}')

            fused_image = fusion.fuse(
                sims,
                transform_key=transform_key,
                output_stack_properties=output_stack_properties,
                fusion_func=fusion.simple_average_fusion,
            )
        return fused_image

    def save_thumbnail(self, output_filename, nom_sims=None, transform_key=None):
        extra_metadata = import_metadata(self.params.get('extra_metadata', {}), input_path=self.params['input'])
        channels = extra_metadata.get('channels', [])
        output_params = self.params_general['output']
        thumbnail_scale = output_params.get('thumbnail_scale', 16)
        sims = self.init_sims(target_scale=thumbnail_scale)[0]

        if nom_sims is not None:
            if sims[0].sizes['x'] >= nom_sims[0].sizes['x']:
                logging.warning('Unable to generate scaled down thumbnail due to lack of source pyramid sizes')
                return

            if transform_key is not None and transform_key != self.source_transform_key:
                for nom_sim, sim in zip(nom_sims, sims):
                    si_utils.set_sim_affine(sim,
                                            si_utils.get_affine_from_sim(nom_sim, transform_key=transform_key),
                                            transform_key=transform_key)
        fused_image = self.fuse(sims, transform_key=transform_key).squeeze()
        save_image(output_filename, output_params.get('thumbnail'), fused_image, channels=channels,
                   transform_key=transform_key, params=output_params)

    def calc_overlap_metrics(self, results):
        nccs = {}
        ssims = {}
        sims = results['sims']
        pairs = results['pairs']
        if pairs is None:
            origins = np.array([get_sim_position_final(sim) for sim in sims])
            size = get_sim_physical_size(sims[0])
            pairs, _ = get_orthogonal_pairs(origins, size)
        for pair in pairs:
            try:
                # experimental; in case fail to extract overlap images
                overlap_sims = self.get_overlap_images((sims[pair[0]], sims[pair[1]]), self.reg_transform_key)
                nccs[pair] = calc_ncc(overlap_sims[0], overlap_sims[1])
                ssims[pair] = calc_ssim(overlap_sims[0], overlap_sims[1])
                #frcs[pair] = calc_frc(overlap_sims[0], overlap_sims[1])
            except Exception as e:
                logging.exception(e)
                #logging.warning(f'Failed to calculate resolution metric')
        return {'ncc': nccs, 'ssim': ssims}

    def get_overlap_images(self, sims, transform_key):
        # functionality copied from registration.register_pair_of_msims()
        spatial_dims = si_utils.get_spatial_dims_from_sim(sims[0])
        overlap_tolerance = {dim: 0.0 for dim in spatial_dims}
        overlap_sims = []
        for sim in sims:
            if 't' in sim.coords.xindexes:
                # work-around for points error in get_overlap_bboxes()
                sim1 = si_utils.sim_sel_coords(sim, {'t': 0})
            else:
                sim1 = sim
            overlap_sims.append(sim1)
        lowers, uppers = get_overlap_bboxes(
            overlap_sims[0],
            overlap_sims[1],
            input_transform_key=transform_key,
            output_transform_key=None,
            overlap_tolerance=overlap_tolerance,
        )

        reg_sims_spacing = [
            si_utils.get_spacing_from_sim(sim) for sim in sims
        ]

        tol = 1e-6
        overlaps_sims = [
            sim.sel(
                {
                    # add spacing to include bounding pixels
                    dim: slice(
                        lowers[isim][idim] - tol - reg_sims_spacing[isim][dim],
                        uppers[isim][idim] + tol + reg_sims_spacing[isim][dim],
                    )
                    for idim, dim in enumerate(spatial_dims)
                },
            )
            for isim, sim in enumerate(sims)
        ]
        overlaps_sims = [sim.squeeze() for sim in overlaps_sims]
        return overlaps_sims

    def calc_metrics(self, results, labels):
        mappings0 = results['mappings']
        mappings = {labels[index]: mapping.data[0].tolist() for index, mapping in mappings0.items()}

        distances = [np.linalg.norm(param_utils.translation_from_affine(mapping.data[0]))
                     for mapping in mappings0.values()]
        if len(distances) > 2:
            # Coefficient of variation
            cvar = np.std(distances) / np.mean(distances)
            var = cvar
        else:
            size = get_sim_physical_size(results['sims'][0])
            norm_distance = np.sum(distances) / np.linalg.norm(size)
            var = norm_distance

        residual_errors = {labels[key[0]] + ' - ' + labels[key[1]]: value
                           for key, value in results['residual_errors'].items()}
        if len(residual_errors) > 0:
            residual_error = np.nanmean(list(residual_errors.values()))
        else:
            residual_error = 1

        registration_qualities = {labels[key[0]] + ' - ' + labels[key[1]]: value.item()
                                  for key, value in results['registration_qualities'].items()}
        if len(registration_qualities) > 0:
            registration_quality = np.nanmean(list(registration_qualities.values()))
        else:
            registration_quality = 0

        #overlap_metrics = self.calc_overlap_metrics(results)

        #nccs = {labels[key[0]] + ' - ' + labels[key[1]]: value
        #         for key, value in overlap_metrics['ncc'].items()}
        #ncc = np.nanmean(list(nccs.values()))

        #ssims = {labels[key[0]] + ' - ' + labels[key[1]]: value
        #         for key, value in overlap_metrics['ssim'].items()}
        #ssim = np.nanmean(list(ssims.values()))

        summary = (f'Residual error: {residual_error:.3f}'
                   f' Registration quality: {registration_quality:.3f}'
        #           f' NCC: {ncc:.3f}'
        #           f' SSIM: {ssim:.3f}'
                   f' Variation: {var:.3f}')

        return {'mappings': mappings,
                'variation': var,
                'residual_error': residual_error,
                'residual_errors': residual_errors,
                'registration_quality': registration_quality,
                'registration_qualities': registration_qualities,
         #       'ncc': ncc,
         #       'nccs': nccs,
         #       'ssim': ssim,
         #       'ssims': ssims,
                'summary': summary}

    def save_video(self, output, sims, fused_image):
        logging.info('Creating transition video...')
        pixel_size = [si_utils.get_spacing_from_sim(sims[0]).get(dim, 1) for dim in 'xy']
        params = self.params
        nframes = params.get('frames', 1)
        spacing = params.get('spacing', [1.1, 1])
        scale = params.get('scale', 1)
        transition_filename = output + 'transition'
        video = Video(transition_filename + '.mp4', fps=params.get('fps', 1))
        positions0 = np.array([si_utils.get_origin_from_sim(sim, asarray=True) for sim in sims])
        center = np.mean(positions0, 0)
        window = get_image_window(fused_image)

        max_size = None
        acum = 0
        for framei in range(nframes):
            c = (1 - np.cos(framei / (nframes - 1) * 2 * math.pi)) / 2
            acum += c / (nframes / 2)
            spacing1 = spacing[0] + (spacing[1] - spacing[0]) * acum
            for sim, position0 in zip(sims, positions0):
                transform = param_utils.identity_transform(ndim=2, t_coords=[0])
                transform[0][:2, 2] += (position0 - center) * spacing1
                si_utils.set_sim_affine(sim, transform, transform_key=self.transition_transform_key)
            frame = fusion.fuse(sims, transform_key=self.transition_transform_key).squeeze()
            frame = float2int_image(normalise_values(frame, window[0], window[1]))
            frame = cv.resize(np.asarray(frame), None, fx=scale, fy=scale)
            if max_size is None:
                max_size = frame.shape[1], frame.shape[0]
                video.size = max_size
            frame = image_reshape(frame, max_size)
            save_tiff(transition_filename + f'{framei:04d}.tiff', frame, None, pixel_size)
            video.write(frame)

        video.close()

logging_dask = params_logging.get('dask', False) instance-attribute

logging_time = params_logging.get('time', False) instance-attribute

mpl_ui = 'mpl' in self.ui or 'plot' in self.ui instance-attribute

napari_ui = 'napari' in self.ui instance-attribute

params_general = params_general instance-attribute

reg_transform_key = 'registered' instance-attribute

source_transform_key = 'source_metadata' instance-attribute

transition_transform_key = 'transition' instance-attribute

ui = self.params_general.get('ui', '') instance-attribute

verbose = params_logging.get('verbose', False) instance-attribute

__init__(params_general)

Source code in src\MVSRegistration.py
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
def __init__(self, params_general):
    super().__init__()
    self.params_general = params_general

    params_logging = self.params_general.get('logging', {})
    self.verbose = params_logging.get('verbose', False)
    self.logging_dask = params_logging.get('dask', False)
    self.logging_time = params_logging.get('time', False)
    self.ui = self.params_general.get('ui', '')
    self.mpl_ui = ('mpl' in self.ui or 'plot' in self.ui)
    self.napari_ui = ('napari' in self.ui)
    self.source_transform_key = 'source_metadata'
    self.reg_transform_key = 'registered'
    self.transition_transform_key = 'transition'

    logging.info(f'Multiview-stitcher version: {multiview_stitcher.__version__}')

calc_metrics(results, labels)

Source code in src\MVSRegistration.py
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
def calc_metrics(self, results, labels):
    mappings0 = results['mappings']
    mappings = {labels[index]: mapping.data[0].tolist() for index, mapping in mappings0.items()}

    distances = [np.linalg.norm(param_utils.translation_from_affine(mapping.data[0]))
                 for mapping in mappings0.values()]
    if len(distances) > 2:
        # Coefficient of variation
        cvar = np.std(distances) / np.mean(distances)
        var = cvar
    else:
        size = get_sim_physical_size(results['sims'][0])
        norm_distance = np.sum(distances) / np.linalg.norm(size)
        var = norm_distance

    residual_errors = {labels[key[0]] + ' - ' + labels[key[1]]: value
                       for key, value in results['residual_errors'].items()}
    if len(residual_errors) > 0:
        residual_error = np.nanmean(list(residual_errors.values()))
    else:
        residual_error = 1

    registration_qualities = {labels[key[0]] + ' - ' + labels[key[1]]: value.item()
                              for key, value in results['registration_qualities'].items()}
    if len(registration_qualities) > 0:
        registration_quality = np.nanmean(list(registration_qualities.values()))
    else:
        registration_quality = 0

    #overlap_metrics = self.calc_overlap_metrics(results)

    #nccs = {labels[key[0]] + ' - ' + labels[key[1]]: value
    #         for key, value in overlap_metrics['ncc'].items()}
    #ncc = np.nanmean(list(nccs.values()))

    #ssims = {labels[key[0]] + ' - ' + labels[key[1]]: value
    #         for key, value in overlap_metrics['ssim'].items()}
    #ssim = np.nanmean(list(ssims.values()))

    summary = (f'Residual error: {residual_error:.3f}'
               f' Registration quality: {registration_quality:.3f}'
    #           f' NCC: {ncc:.3f}'
    #           f' SSIM: {ssim:.3f}'
               f' Variation: {var:.3f}')

    return {'mappings': mappings,
            'variation': var,
            'residual_error': residual_error,
            'residual_errors': residual_errors,
            'registration_quality': registration_quality,
            'registration_qualities': registration_qualities,
     #       'ncc': ncc,
     #       'nccs': nccs,
     #       'ssim': ssim,
     #       'ssims': ssims,
            'summary': summary}

calc_overlap_metrics(results)

Source code in src\MVSRegistration.py
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
def calc_overlap_metrics(self, results):
    nccs = {}
    ssims = {}
    sims = results['sims']
    pairs = results['pairs']
    if pairs is None:
        origins = np.array([get_sim_position_final(sim) for sim in sims])
        size = get_sim_physical_size(sims[0])
        pairs, _ = get_orthogonal_pairs(origins, size)
    for pair in pairs:
        try:
            # experimental; in case fail to extract overlap images
            overlap_sims = self.get_overlap_images((sims[pair[0]], sims[pair[1]]), self.reg_transform_key)
            nccs[pair] = calc_ncc(overlap_sims[0], overlap_sims[1])
            ssims[pair] = calc_ssim(overlap_sims[0], overlap_sims[1])
            #frcs[pair] = calc_frc(overlap_sims[0], overlap_sims[1])
        except Exception as e:
            logging.exception(e)
            #logging.warning(f'Failed to calculate resolution metric')
    return {'ncc': nccs, 'ssim': ssims}

fuse(sims, transform_key=None)

Source code in src\MVSRegistration.py
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
def fuse(self, sims, transform_key=None):
    if transform_key is None:
        transform_key = self.reg_transform_key
    operation = self.params['operation']
    extra_metadata = import_metadata(self.params.get('extra_metadata', {}), input_path=self.params['input'])
    channels = extra_metadata.get('channels', [])
    z_scale = extra_metadata.get('scale', {}).get('z')
    if z_scale is None:
        if 'z' in sims[0].dims:
            z_scale = np.min(np.diff(sorted(set([si_utils.get_origin_from_sim(sim).get('z', 0) for sim in sims]))))
    if not z_scale:
        z_scale = 1

    is_3d = ('3d' in operation)
    is_channel_overlay = (len(channels) > 1)

    sim0 = sims[0]
    source_type = sim0.dtype

    output_stack_properties = calc_output_properties(sims, transform_key, z_scale=z_scale)
    if is_channel_overlay:
        # convert to multichannel images
        if self.verbose:
            logging.info(f'Output stack: {output_stack_properties}')
        data_size = np.prod(list(output_stack_properties['shape'].values())) * len(sims) * source_type.itemsize
        logging.info(f'Fusing channels {print_hbytes(data_size)}')

        channel_sims = [fusion.fuse(
            [sim],
            transform_key=transform_key,
            output_stack_properties=output_stack_properties
        ) for sim in sims]
        channel_sims = [sim.assign_coords({'c': [channels[simi]['label']]}) for simi, sim in enumerate(channel_sims)]
        fused_image = xr.combine_nested([sim.rename() for sim in channel_sims], concat_dim='c', combine_attrs='override')
    else:
        if is_3d:
            z_positions = sorted(set([si_utils.get_origin_from_sim(sim).get('z', 0) for sim in sims]))
            z_shape = len(z_positions)
            if z_shape <= 1:
                z_shape = len(sims)
            output_stack_properties['shape']['z'] = z_shape
        if self.verbose:
            logging.info(f'Output stack: {output_stack_properties}')
        data_size = np.prod(list(output_stack_properties['shape'].values())) * source_type.itemsize
        logging.info(f'Fusing {print_hbytes(data_size)}')

        fused_image = fusion.fuse(
            sims,
            transform_key=transform_key,
            output_stack_properties=output_stack_properties,
            fusion_func=fusion.simple_average_fusion,
        )
    return fused_image

get_overlap_images(sims, transform_key)

Source code in src\MVSRegistration.py
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
def get_overlap_images(self, sims, transform_key):
    # functionality copied from registration.register_pair_of_msims()
    spatial_dims = si_utils.get_spatial_dims_from_sim(sims[0])
    overlap_tolerance = {dim: 0.0 for dim in spatial_dims}
    overlap_sims = []
    for sim in sims:
        if 't' in sim.coords.xindexes:
            # work-around for points error in get_overlap_bboxes()
            sim1 = si_utils.sim_sel_coords(sim, {'t': 0})
        else:
            sim1 = sim
        overlap_sims.append(sim1)
    lowers, uppers = get_overlap_bboxes(
        overlap_sims[0],
        overlap_sims[1],
        input_transform_key=transform_key,
        output_transform_key=None,
        overlap_tolerance=overlap_tolerance,
    )

    reg_sims_spacing = [
        si_utils.get_spacing_from_sim(sim) for sim in sims
    ]

    tol = 1e-6
    overlaps_sims = [
        sim.sel(
            {
                # add spacing to include bounding pixels
                dim: slice(
                    lowers[isim][idim] - tol - reg_sims_spacing[isim][dim],
                    uppers[isim][idim] + tol + reg_sims_spacing[isim][dim],
                )
                for idim, dim in enumerate(spatial_dims)
            },
        )
        for isim, sim in enumerate(sims)
    ]
    overlaps_sims = [sim.squeeze() for sim in overlaps_sims]
    return overlaps_sims

init_sims(target_scale=None)

Source code in src\MVSRegistration.py
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
def init_sims(self, target_scale=None):
    operation = self.params['operation']
    source_metadata = import_metadata(self.params.get('source_metadata', 'source'), input_path=self.params['input'])
    chunk_size = self.params_general.get('chunk_size', [1024, 1024])
    extra_metadata = import_metadata(self.params.get('extra_metadata', {}), input_path=self.params['input'])
    z_scale = extra_metadata.get('scale', {}).get('z')

    logging.info('Initialising sims...')
    sources = [create_dask_source(file, source_metadata) for file in self.filenames]
    source0 = sources[0]
    images = []
    sims = []
    scales = []
    translations = []
    rotations = []

    is_stack = ('stack' in operation)
    has_z_size = (source0.get_size().get('z', 0) > 0)
    is_3d = (has_z_size or '3d' in operation)
    pyramid_level = 0

    output_order = 'zyx' if is_stack or is_3d else 'yx'
    ndims = len(output_order)
    if source0.get_nchannels() > 1:
        output_order += 'c'

    last_z_position = None
    different_z_positions = False
    delta_zs = []
    for filename, source in zip(self.filenames, sources):
        scale = source.get_pixel_size()
        translation = source.get_position()
        rotation = source.get_rotation()

        if target_scale:
            pyramid_level = np.argmin(abs(np.array(source.scales) - target_scale))
            pyramid_scale = source.scales[pyramid_level]
            scale = {dim: size * pyramid_scale if dim in 'xy' else size for dim, size in scale.items()}
        if 'invert' in source_metadata:
            translation[0] = -translation[0]
            translation[1] = -translation[1]
        if len(translation) >= 3:
            z_position = translation['z']
        else:
            z_position = 0
        if last_z_position is not None and z_position != last_z_position:
            different_z_positions = True
            delta_zs.append(z_position - last_z_position)
        if self.global_rotation is not None:
            rotation = self.global_rotation

        dask_data = source.get_data(level=pyramid_level)
        image = redimension_data(dask_data, source.dimension_order, output_order)

        scales.append(scale)
        translations.append(translation)
        rotations.append(rotation)
        images.append(image)
        last_z_position = z_position

    if z_scale is None:
        if len(delta_zs) > 0:
            z_scale = np.min(delta_zs)
        else:
            z_scale = 1

    if 'norm' in source_metadata:
        size = np.array(source0.get_size()) * source0.get_pixel_size_micrometer()
        center = None
        if 'center' in source_metadata:
            if 'global' in source_metadata:
                center = self.global_center
            else:
                center = np.mean(translations, 0)
        elif 'origin' in source_metadata:
            center = np.zeros(ndims)
        translations, rotations = normalise_rotated_positions(translations, rotations, size, center)

    #translations = [np.array(translation) * 1.25 for translation in translations]

    increase_z_positions = is_stack and not different_z_positions
    z_position = 0
    scales2 = []
    translations2 = []
    for source, image, scale, translation, rotation, file_label in zip(sources, images, scales, translations, rotations, self.file_labels):
        # transform #dimensions need to match
        if len(scale) > 0 and 'z' not in scale:
            scale['z'] = abs(z_scale)
        if (len(translation) > 0 and 'z' not in translation) or increase_z_positions:
            translation['z'] = z_position
        if increase_z_positions:
            z_position += z_scale
        channel_labels = [channel.get('label', '') for channel in source.get_channels()]
        if rotation is None or 'norm' in source_metadata:
            # if positions are normalised, don't use rotation
            transform = None
        else:
            transform = param_utils.invert_coordinate_order(
                create_transform(translation, rotation, matrix_size=ndims + 1)
            )
        if file_label in extra_metadata:
            transform2 = extra_metadata[file_label]
            if transform is None:
                transform = np.array(transform2)
            else:
                transform = np.array(combine_transforms([transform, transform2]))
        sim = si_utils.get_sim_from_array(
            image,
            dims=list(output_order),
            scale=scale,
            translation=translation,
            affine=transform,
            transform_key=self.source_transform_key,
            c_coords=channel_labels
        )
        if len(sim.chunksizes.get('x')) == 1 and len(sim.chunksizes.get('y')) == 1:
            sim = sim.chunk(xyz_to_dict(chunk_size))
        sims.append(sim)
        scales2.append(dict_to_xyz(scale))
        translations2.append(dict_to_xyz(translation))
    return sims, scales2, translations2, rotations

preprocess(sims, params)

Source code in src\MVSRegistration.py
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
def preprocess(self, sims, params):
    flatfield_quantiles = params.get('flatfield_quantiles')
    normalisation = params.get('normalisation', '')
    filter_foreground = params.get('filter_foreground', False)

    if filter_foreground:
        foreground_map = calc_foreground_map(sims)
    else:
        foreground_map = None
    if flatfield_quantiles is not None:
        logging.info('Flat-field correction...')
        new_sims = [None] * len(sims)
        for sim_indices in group_sims_by_z(sims):
            sims_z_set = [sims[i] for i in sim_indices]
            foreground_map_z_set = [foreground_map[i] for i in sim_indices] if foreground_map is not None else None
            new_sims_z_set = flatfield_correction(sims_z_set, self.source_transform_key, flatfield_quantiles,
                                                  foreground_map=foreground_map_z_set)
            for sim_index, sim in zip(sim_indices, new_sims_z_set):
                new_sims[sim_index] = sim
        sims = new_sims

    if normalisation:
        use_global = ('global' in normalisation)
        if use_global:
            logging.info('Normalising (global)...')
        else:
            logging.info('Normalising (individual)...')
        new_sims = normalise(sims, self.source_transform_key, use_global=use_global)
    else:
        new_sims = sims

    if filter_foreground:
        logging.info('Filtering foreground images...')
        #tile_vars = np.array([np.asarray(np.std(sim)).item() for sim in sims])
        #threshold1 = np.mean(tile_vars)
        #threshold2 = np.median(tile_vars)
        #threshold3, _ = cv.threshold(np.array(tile_vars).astype(np.uint16), 0, 1, cv.THRESH_OTSU)
        #threshold = min(threshold1, threshold2, threshold3)
        #foregrounds = (tile_vars >= threshold)
        new_sims = [sim for sim, is_foreground in zip(new_sims, foreground_map) if is_foreground]
        logging.info(f'Foreground images: {len(new_sims)} / {len(sims)}')
        indices = np.where(foreground_map)[0]
    else:
        indices = range(len(sims))
    return sims, new_sims, indices

register(sims, register_sims, indices, params)

Source code in src\MVSRegistration.py
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
def register(self, sims, register_sims, indices, params):
    sim0 = sims[0]
    ndims = si_utils.get_ndim_from_sim(sim0)

    operation = params['operation']
    reg_params = params.get('method')
    if isinstance(reg_params, dict):
        reg_method = reg_params.get('name', '').lower()
    else:
        reg_method = reg_params.lower()
    use_orthogonal_pairs = params.get('use_orthogonal_pairs', False)

    is_stack = ('stack' in operation)
    is_3d = ('3d' in operation)
    debug = self.params_general.get('debug', False)

    reg_channel = params.get('channel', 0)
    if isinstance(reg_channel, int):
        reg_channel_index = reg_channel
        reg_channel = None
    else:
        reg_channel_index = None

    groupwise_resolution_kwargs = {
        'transform': params.get('transform_type')  # options include 'translation', 'rigid', 'affine'
    }
    pairwise_reg_func_kwargs = None
    if is_stack and not is_3d:
        # register in 2d; pairwise consecutive views
        register_sims = [si_utils.max_project_sim(sim, dim='z') for sim in register_sims]
        pairs = [(index, index + 1) for index in range(len(register_sims) - 1)]
    elif use_orthogonal_pairs:
        origins = np.array([get_sim_position_final(sim) for sim in register_sims])
        size = get_sim_physical_size(sim0)
        pairs, _ = get_orthogonal_pairs(origins, size)
        logging.info(f'#pairs: {len(pairs)}')
        for pair in pairs:
            print(f'{self.file_labels[pair[0]]} - {self.file_labels[pair[1]]}')
    else:
        pairs = None

    if is_3d:
        overlap_tolerance = {'z': 1}
    else:
        overlap_tolerance = None

    if '3din2d' in reg_method:
        from src.registration_methods.RegistrationMethodANTs3Din2D import RegistrationMethodANTs3Din2D
        registration_method = RegistrationMethodANTs3Din2D(sim0, reg_params, debug)
        pairwise_reg_func = registration_method.registration
    elif 'cpd' in reg_method:
        from src.registration_methods.RegistrationMethodCPD import RegistrationMethodCPD
        registration_method = RegistrationMethodCPD(sim0, reg_params, debug)
        pairwise_reg_func = registration_method.registration
    elif 'feature' in reg_method or 'orb' in reg_method or 'sift' in reg_method:
        if 'cv' in reg_method:
            from src.registration_methods.RegistrationMethodCvFeatures import RegistrationMethodCvFeatures
            registration_method = RegistrationMethodCvFeatures(sim0, reg_params, debug)
        else:
            from src.registration_methods.RegistrationMethodSkFeatures import RegistrationMethodSkFeatures
            registration_method = RegistrationMethodSkFeatures(sim0, reg_params, debug)
        pairwise_reg_func = registration_method.registration
    elif 'ant' in reg_method:
        pairwise_reg_func = registration.registration_ANTsPy
        # args for ANTsPy registration: used internally by ANYsPy algorithm
        pairwise_reg_func_kwargs = {
            'transform_types': ['Rigid'],
            "aff_random_sampling_rate": 0.5,
            "aff_iterations": (2000, 2000, 1000, 1000),
            "aff_smoothing_sigmas": (4, 2, 1, 0),
            "aff_shrink_factors": (16, 8, 2, 1),
        }
    else:
        pairwise_reg_func = registration.phase_correlation_registration

    # Pass registration through metrics method
    #from src.registration_methods.RegistrationMetrics import RegistrationMetrics
    #registration_metrics = RegistrationMetrics(sim0, pairwise_reg_function)
    #pairwise_reg_function = registration_metrics.registration
    # TODO: extract metrics from registration_metrics

    logging.info(f'Registration method: {reg_method}')

    try:
        logging.info('Registering...')
        register_msims = [msi_utils.get_msim_from_sim(sim) for sim in register_sims]
        reg_result = registration.register(
            register_msims,
            reg_channel=reg_channel,
            reg_channel_index=reg_channel_index,
            transform_key=self.source_transform_key,
            new_transform_key=self.reg_transform_key,

            pairs=pairs,
            pre_registration_pruning_method=None,

            pairwise_reg_func=pairwise_reg_func,
            pairwise_reg_func_kwargs=pairwise_reg_func_kwargs,
            groupwise_resolution_kwargs=groupwise_resolution_kwargs,

            post_registration_do_quality_filter=True,
            post_registration_quality_threshold=0.1,

            plot_summary=self.mpl_ui,
            return_dict=True,

            overlap_tolerance=overlap_tolerance,
        )
        # copy transforms from register sims to unmodified sims
        for reg_msim, index in zip(register_msims, indices):
            si_utils.set_sim_affine(
                sims[index],
                msi_utils.get_transform_from_msim(reg_msim, transform_key=self.reg_transform_key),
                transform_key=self.reg_transform_key)

        # set missing transforms
        for sim in sims:
            if self.reg_transform_key not in si_utils.get_tranform_keys_from_sim(sim):
                si_utils.set_sim_affine(
                    sim,
                    param_utils.identity_transform(ndim=ndims, t_coords=[0]),
                    transform_key=self.reg_transform_key)

        mappings = reg_result['params']
        # re-index from subset of sims
        residual_error_dict = reg_result.get('groupwise_resolution', {}).get('metrics', {}).get('residuals', {})
        residual_error_dict = {(indices[key[0]], indices[key[1]]): value.item()
                               for key, value in residual_error_dict.items()}
        registration_qualities_dict = reg_result.get('pairwise_registration', {}).get('metrics', {}).get('qualities', {})
        registration_qualities_dict = {(indices[key[0]], indices[key[1]]): value
                                       for key, value in registration_qualities_dict.items()}
    except NotEnoughOverlapError:
        logging.warning('Not enough overlap')
        reg_result = {}
        mappings = [param_utils.identity_transform(ndim=ndims, t_coords=[0])] * len(sims)
        residual_error_dict = {}
        registration_qualities_dict = {}

    # re-index from subset of sims
    mappings_dict = {index: mapping for index, mapping in zip(indices, mappings)}

    if is_stack:
        # set 3D affine transforms from 2D registration params
        for index, sim in enumerate(sims):
            # check if already 3D
            if 4 not in si_utils.get_affine_from_sim(sim, transform_key=self.reg_transform_key).shape:
                affine_3d = param_utils.identity_transform(ndim=3)
                affine_3d.loc[{dim: mappings[index].coords[dim] for dim in mappings[index].sel(t=0).dims}] = mappings[index].sel(t=0)
                si_utils.set_sim_affine(sim, affine_3d, transform_key=self.reg_transform_key)

    return {'reg_result': reg_result,
            'mappings': mappings_dict,
            'residual_errors': residual_error_dict,
            'registration_qualities': registration_qualities_dict,
            'sims': sims,
            'pairs': pairs}

run_operation(fileset_label, filenames, params, global_rotation=None, global_center=None)

Source code in src\MVSRegistration.py
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def run_operation(self, fileset_label, filenames, params, global_rotation=None, global_center=None):
    self.fileset_label = fileset_label
    self.filenames = filenames
    self.file_labels = get_unique_file_labels(filenames)
    self.params = params
    self.global_rotation = global_rotation
    self.global_center = global_center

    input_dir = os.path.dirname(filenames[0])
    parts = split_numeric_dict(filenames[0])
    output_pattern = params['output'].format_map(parts)
    self.output = os.path.join(input_dir, output_pattern)    # preserve trailing slash: do not use os.path.normpath()

    with ProgressBar(minimum=10, dt=1) if self.logging_dask else nullcontext():
        return self._run_operation()

save_thumbnail(output_filename, nom_sims=None, transform_key=None)

Source code in src\MVSRegistration.py
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
def save_thumbnail(self, output_filename, nom_sims=None, transform_key=None):
    extra_metadata = import_metadata(self.params.get('extra_metadata', {}), input_path=self.params['input'])
    channels = extra_metadata.get('channels', [])
    output_params = self.params_general['output']
    thumbnail_scale = output_params.get('thumbnail_scale', 16)
    sims = self.init_sims(target_scale=thumbnail_scale)[0]

    if nom_sims is not None:
        if sims[0].sizes['x'] >= nom_sims[0].sizes['x']:
            logging.warning('Unable to generate scaled down thumbnail due to lack of source pyramid sizes')
            return

        if transform_key is not None and transform_key != self.source_transform_key:
            for nom_sim, sim in zip(nom_sims, sims):
                si_utils.set_sim_affine(sim,
                                        si_utils.get_affine_from_sim(nom_sim, transform_key=transform_key),
                                        transform_key=transform_key)
    fused_image = self.fuse(sims, transform_key=transform_key).squeeze()
    save_image(output_filename, output_params.get('thumbnail'), fused_image, channels=channels,
               transform_key=transform_key, params=output_params)

save_video(output, sims, fused_image)

Source code in src\MVSRegistration.py
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
def save_video(self, output, sims, fused_image):
    logging.info('Creating transition video...')
    pixel_size = [si_utils.get_spacing_from_sim(sims[0]).get(dim, 1) for dim in 'xy']
    params = self.params
    nframes = params.get('frames', 1)
    spacing = params.get('spacing', [1.1, 1])
    scale = params.get('scale', 1)
    transition_filename = output + 'transition'
    video = Video(transition_filename + '.mp4', fps=params.get('fps', 1))
    positions0 = np.array([si_utils.get_origin_from_sim(sim, asarray=True) for sim in sims])
    center = np.mean(positions0, 0)
    window = get_image_window(fused_image)

    max_size = None
    acum = 0
    for framei in range(nframes):
        c = (1 - np.cos(framei / (nframes - 1) * 2 * math.pi)) / 2
        acum += c / (nframes / 2)
        spacing1 = spacing[0] + (spacing[1] - spacing[0]) * acum
        for sim, position0 in zip(sims, positions0):
            transform = param_utils.identity_transform(ndim=2, t_coords=[0])
            transform[0][:2, 2] += (position0 - center) * spacing1
            si_utils.set_sim_affine(sim, transform, transform_key=self.transition_transform_key)
        frame = fusion.fuse(sims, transform_key=self.transition_transform_key).squeeze()
        frame = float2int_image(normalise_values(frame, window[0], window[1]))
        frame = cv.resize(np.asarray(frame), None, fx=scale, fy=scale)
        if max_size is None:
            max_size = frame.shape[1], frame.shape[0]
            video.size = max_size
        frame = image_reshape(frame, max_size)
        save_tiff(transition_filename + f'{framei:04d}.tiff', frame, None, pixel_size)
        video.write(frame)

    video.close()

validate_overlap(sims, labels, is_stack=False, expect_large_overlap=False)

Source code in src\MVSRegistration.py
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
def validate_overlap(self, sims, labels, is_stack=False, expect_large_overlap=False):
    min_dists = []
    has_overlaps = []
    n = len(sims)
    positions = [get_sim_position_final(sim) for sim in sims]
    sizes = [np.linalg.norm(get_sim_physical_size(sim)) for sim in sims]
    for i in range(n):
        norm_dists = []
        # check if only single z slices
        if is_stack:
            if i + 1 < n:
                compare_indices = [i + 1]
            else:
                compare_indices = []
        else:
            compare_indices = range(n)
        for j in compare_indices:
            if not j == i:
                distance = math.dist(positions[i], positions[j])
                norm_dist = distance / np.mean([sizes[i], sizes[j]])
                norm_dists.append(norm_dist)
        if len(norm_dists) > 0:
            norm_dist = min(norm_dists)
            min_dists.append(float(norm_dist))
            if norm_dist >= 1:
                logging.warning(f'{labels[i]} has no overlap')
                has_overlaps.append(False)
            elif expect_large_overlap and norm_dist > 0.5:
                logging.warning(f'{labels[i]} has small overlap')
                has_overlaps.append(False)
            else:
                has_overlaps.append(True)
    return min_dists, has_overlaps

apply_transform(points, transform)

Source code in src\util.py
371
372
373
374
375
376
377
378
379
def apply_transform(points, transform):
    new_points = []
    for point in points:
        point_len = len(point)
        while len(point) < len(transform):
            point = list(point) + [1]
        new_point = np.dot(point, np.transpose(np.array(transform)))
        new_points.append(new_point[:point_len])
    return new_points

blur_image(image, sigma)

Source code in src\image\util.py
432
433
434
435
436
437
438
439
440
def blur_image(image, sigma):
    nchannels = image.shape[2] if image.ndim == 3 else 1
    if nchannels not in [1, 3]:
        new_image = np.zeros_like(image)
        for channeli in range(nchannels):
            new_image[..., channeli] = blur_image_single(image[..., channeli], sigma)
    else:
        new_image = blur_image_single(image, sigma)
    return new_image

blur_image_single(image, sigma)

Source code in src\image\util.py
428
429
def blur_image_single(image, sigma):
    return gaussian_filter(image, sigma)

calc_foreground_map(sims)

Source code in src\image\util.py
564
565
566
567
568
569
570
571
572
573
574
575
576
577
def calc_foreground_map(sims):
    if len(sims) <= 2:
        return [True] * len(sims)
    sims = [sim.squeeze().astype(np.float32) for sim in sims]
    median_image = calc_images_median(sims).astype(np.float32)
    difs = [np.mean(np.abs(sim - median_image), (0, 1)) for sim in sims]
    # or use stddev instead of mean?
    threshold = np.mean(difs, 0)
    #threshold, _ = cv.threshold(np.array(difs).astype(np.uint16), 0, 1, cv.THRESH_OTSU)
    #threshold, foregrounds = filter_noise_images(channel_images)
    map = (difs > threshold)
    if np.all(map == False):
        return [True] * len(sims)
    return map

calc_images_median(images)

Source code in src\image\util.py
443
444
445
446
def calc_images_median(images):
    out_image = np.zeros(shape=images[0].shape, dtype=images[0].dtype)
    median_image = np.median(images, 0, out_image)
    return median_image

calc_images_quantiles(images, quantiles)

Source code in src\image\util.py
449
450
451
def calc_images_quantiles(images, quantiles):
    quantile_images = [image.astype(np.float32) for image in np.quantile(images, quantiles, 0)]
    return quantile_images

calc_output_properties(sims, transform_key, z_scale=None)

Source code in src\image\util.py
627
628
629
630
631
632
633
634
635
636
637
def calc_output_properties(sims, transform_key, z_scale=None):
    output_spacing = si_utils.get_spacing_from_sim(sims[0])
    if z_scale is not None:
        output_spacing['z'] = z_scale
    output_properties = fusion.calc_fusion_stack_properties(
        sims,
        [si_utils.get_affine_from_sim(sim, transform_key) for sim in sims],
        output_spacing,
        mode='union',
    )
    return output_properties

calc_pyramid(xyzct, npyramid_add=0, pyramid_downsample=2, volumetric_resize=False)

Source code in src\image\util.py
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def calc_pyramid(xyzct: tuple, npyramid_add: int = 0, pyramid_downsample: float = 2,
                 volumetric_resize: bool = False) -> list:
    x, y, z, c, t = xyzct
    if volumetric_resize and z > 1:
        size = (x, y, z)
    else:
        size = (x, y)
    sizes_add = []
    scale = 1
    for _ in range(npyramid_add):
        scale /= pyramid_downsample
        scaled_size = np.maximum(np.round(np.multiply(size, scale)).astype(int), 1)
        sizes_add.append(scaled_size)
    return sizes_add

check_round_significants(a, significant_digits)

Source code in src\util.py
113
114
115
116
117
118
119
120
121
def check_round_significants(a: float, significant_digits: int) -> float:
    rounded = round_significants(a, significant_digits)
    if a != 0:
        dif = 1 - rounded / a
    else:
        dif = rounded - a
    if abs(dif) < 10 ** -significant_digits:
        return rounded
    return a

color_image(image)

Source code in src\image\util.py
42
43
44
45
46
47
def color_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 1:
        return cv.cvtColor(np.array(image), cv.COLOR_GRAY2RGB)
    else:
        return image

combine_transforms(transforms)

Source code in src\image\util.py
709
710
711
712
713
714
715
716
def combine_transforms(transforms):
    combined_transform = None
    for transform in transforms:
        if combined_transform is None:
            combined_transform = transform
        else:
            combined_transform = np.dot(transform, combined_transform)
    return combined_transform

convert_image_sign_type(image, target_dtype)

Source code in src\image\util.py
 97
 98
 99
100
101
102
103
104
105
106
107
def convert_image_sign_type(image: np.ndarray, target_dtype: np.dtype) -> np.ndarray:
    source_dtype = image.dtype
    if source_dtype.kind == target_dtype.kind:
        new_image = image
    elif source_dtype.kind == 'i':
        new_image = ensure_unsigned_image(image)
    else:
        # conversion without overhead
        offset = 2 ** (8 * target_dtype.itemsize - 1)
        new_image = (image - offset).astype(target_dtype)
    return new_image

convert_rational_value(value)

Source code in src\util.py
321
322
323
324
325
326
327
def convert_rational_value(value) -> float:
    if value is not None and isinstance(value, tuple):
        if value[0] == value[1]:
            value = value[0]
        else:
            value = value[0] / value[1]
    return value

convert_to_um(value, unit)

Source code in src\util.py
310
311
312
313
314
315
316
317
318
def convert_to_um(value, unit):
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    return value * conversions.get(unit, 1)

create_compression_filter(compression)

Source code in src\image\util.py
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
def create_compression_filter(compression: list) -> tuple:
    compressor, compression_filters = None, None
    compression = ensure_list(compression)
    if compression is not None and len(compression) > 0:
        compression_type = compression[0].lower()
        if len(compression) > 1:
            level = int(compression[1])
        else:
            level = None
        if 'lzw' in compression_type:
            from imagecodecs.numcodecs import Lzw
            compression_filters = [Lzw()]
        elif '2k' in compression_type or '2000' in compression_type:
            from imagecodecs.numcodecs import Jpeg2k
            compression_filters = [Jpeg2k(level=level)]
        elif 'jpegls' in compression_type:
            from imagecodecs.numcodecs import Jpegls
            compression_filters = [Jpegls(level=level)]
        elif 'jpegxr' in compression_type:
            from imagecodecs.numcodecs import Jpegxr
            compression_filters = [Jpegxr(level=level)]
        elif 'jpegxl' in compression_type:
            from imagecodecs.numcodecs import Jpegxl
            compression_filters = [Jpegxl(level=level)]
        else:
            compressor = compression
    return compressor, compression_filters

create_transform(center, angle, matrix_size=3)

Source code in src\util.py
356
357
358
359
360
361
362
363
364
365
366
367
368
def create_transform(center, angle, matrix_size=3):
    if isinstance(center, dict):
        center = dict_to_xyz(center)
    if len(center) == 2:
        center = np.array(list(center) + [0])
    if angle is None:
        angle = 0
    r = Rotation.from_euler('z', angle, degrees=True)
    t = center - r.apply(center, inverse=True)
    transform = np.eye(matrix_size)
    transform[:3, :3] = np.transpose(r.as_matrix())
    transform[:3, -1] += t
    return transform

create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0))

Source code in src\util.py
348
349
350
351
352
353
def create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0)):
    transform = cv.getRotationMatrix2D(center[:2], angle, scale)
    transform[:, 2] += translate
    if len(transform) == 2:
        transform = np.vstack([transform, [0, 0, 1]])   # create 3x3 matrix
    return transform

desc_to_dict(desc)

Source code in src\util.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def desc_to_dict(desc: str) -> dict:
    desc_dict = {}
    if desc.startswith('{'):
        try:
            metadata = ast.literal_eval(desc)
            return metadata
        except:
            pass
    for item in re.split(r'[\r\n\t|]', desc):
        item_sep = '='
        if ':' in item:
            item_sep = ':'
        if item_sep in item:
            items = item.split(item_sep)
            key = items[0].strip()
            value = items[1].strip()
            for dtype in (int, float, bool):
                try:
                    value = dtype(value)
                    break
                except:
                    pass
            desc_dict[key] = value
    return desc_dict

detect_area_points(image)

Source code in src\image\util.py
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
def detect_area_points(image):
    method = cv.THRESH_OTSU
    threshold = -5
    contours = []
    while len(contours) <= 1 and threshold <= 255:
        _, binimage = cv.threshold(np.array(uint8_image(image)), threshold, 255, method)
        contours0 = cv.findContours(binimage, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
        contours = contours0[0] if len(contours0) == 2 else contours0[1]
        method = cv.THRESH_BINARY
        threshold += 5
    area_contours = [(contour, cv.contourArea(contour)) for contour in contours]
    area_contours.sort(key=lambda contour_area: contour_area[1], reverse=True)
    min_area = max(np.mean([area for contour, area in area_contours]), 1)
    area_points = [(get_center(contour), area) for contour, area in area_contours if area > min_area]

    #image = cv.cvtColor(image, cv.COLOR_GRAY2BGR)
    #for point in area_points:
    #    radius = int(np.round(np.sqrt(point[1]/np.pi)))
    #    cv.circle(image, tuple(np.round(point[0]).astype(int)), radius, (255, 0, 0), -1)
    #show_image(image)
    return area_points

dict_to_xyz(dct, keys='xyz')

Source code in src\util.py
452
453
def dict_to_xyz(dct, keys='xyz'):
    return [dct[key] for key in keys if key in dct]

dir_regex(pattern)

Source code in src\util.py
141
142
143
144
145
146
def dir_regex(pattern):
    files = []
    for pattern_item in ensure_list(pattern):
        files.extend(glob.glob(pattern_item, recursive=True))
    files_sorted = sorted(files, key=lambda file: find_all_numbers(get_filetitle(file)))
    return files_sorted

draw_edge_filter(bounds)

Source code in src\util.py
496
497
498
499
500
501
502
503
504
def draw_edge_filter(bounds):
    out_image = np.zeros(np.flip(bounds))
    y, x = np.where(out_image == 0)
    points = np.transpose([x, y])

    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) * 10, 0, 1)
    return position_weights.reshape(np.flip(bounds))

draw_keypoints(image, points, color=(255, 0, 0))

Source code in src\image\util.py
279
280
281
282
283
284
def draw_keypoints(image, points, color=(255, 0, 0)):
    out_image = color_image(float2int_image(image))
    for point in points:
        point = np.round(point).astype(int)
        cv.drawMarker(out_image, tuple(point), color=color, markerType=cv.MARKER_CROSS, markerSize=5, thickness=1)
    return out_image

draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None, points_color='black', match_color='red', inlier_color='lime', show_plot=True, output_filename=None)

Source code in src\image\util.py
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
def draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None,
                           points_color='black', match_color='red', inlier_color='lime',
                           show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape = np.max([image.shape for image in [image1, image2]], axis=0)
    shape_y, shape_x = shape[:2]
    if shape_x > 2 * shape_y:
        merge_axis = 0
        offset2 = [shape_y, 0]
    else:
        merge_axis = 1
        offset2 = [0, shape_x]
    image = np.concatenate([
        np.pad(image1, ((0, shape[0] - image1.shape[0]), (0, shape[1] - image1.shape[1]))),
        np.pad(image2, ((0, shape[0] - image2.shape[0]), (0, shape[1] - image2.shape[1])))
    ], axis=merge_axis)
    ax.imshow(image, cmap='gray')

    ax.scatter(
        points1[:, 1],
        points1[:, 0],
        facecolors='none',
        edgecolors=points_color,
    )
    ax.scatter(
        points2[:, 1] + offset2[1],
        points2[:, 0] + offset2[0],
        facecolors='none',
        edgecolors=points_color,
    )

    for i, match in enumerate(matches):
        color = match_color
        if i < len(inliers) and inliers[i]:
            color = inlier_color
        index1, index2 = match
        ax.plot(
            (points1[index1, 1], points2[index2, 1] + offset2[1]),
            (points1[index1, 0], points2[index2, 0] + offset2[0]),
            '-', linewidth=1, alpha=0.5, color=color,
        )

    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

    return fig, ax

draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None, color=(255, 0, 0), inlier_color=(0, 255, 0), radius=15, thickness=2)

Source code in src\image\util.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
def draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None,
                              color=(255, 0, 0), inlier_color=(0, 255, 0), radius = 15, thickness = 2):
    # based on https://gist.github.com/woolpeeker/d7e1821e1b5c556b32aafe10b7a1b7e8
    image1 = uint8_image(image1)
    image2 = uint8_image(image2)
    # We're drawing them side by side.  Get dimensions accordingly.
    new_shape = (max(image1.shape[0], image2.shape[0]), image1.shape[1] + image2.shape[1], 3)
    out_image = np.zeros(new_shape, image1.dtype)
    # Place images onto the new image.
    out_image[0:image1.shape[0], 0:image1.shape[1]] = color_image(image1)
    out_image[0:image2.shape[0], image1.shape[1]:image1.shape[1] + image2.shape[1]] = color_image(image2)

    if matches is not None:
        # Draw lines between matches.  Make sure to offset kp coords in second image appropriately.
        for index, match in enumerate(matches):
            if inliers is not None and inliers[index]:
                line_color = inlier_color
            else:
                line_color = color
            # So the keypoint locs are stored as a tuple of floats.  cv2.line() wants locs as a tuple of ints.
            end1 = tuple(np.round(points1[match[0]]).astype(int))
            end2 = tuple(np.round(points2[match[1]]).astype(int) + np.array([image1.shape[1], 0]))
            cv.line(out_image, end1, end2, line_color, thickness)
            cv.circle(out_image, end1, radius, line_color, thickness)
            cv.circle(out_image, end2, radius, line_color, thickness)
    else:
        # Draw all points if no matches are provided.
        for point in points1:
            point = tuple(np.round(point).astype(int))
            cv.circle(out_image, point, radius, color, thickness)
        for point in points2:
            point = tuple(np.round(point).astype(int) + np.array([image1.shape[1], 0]))
            cv.circle(out_image, point, radius, color, thickness)
    return out_image

draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None, show_plot=True, output_filename=None)

Source code in src\image\util.py
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
def draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None,
                              show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape_y, shape_x = image1.shape[:2]
    if shape_x > 2 * shape_y:
        alignment = 'vertical'
    else:
        alignment = 'horizontal'
    plot_matched_features(
        image1,
        image2,
        keypoints0=points1,
        keypoints1=points2,
        matches=matches,
        ax=ax,
        alignment=alignment,
        only_matches=True,
    )
    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

ensure_list(x)

Source code in src\util.py
18
19
20
21
22
23
24
def ensure_list(x) -> list:
    if x is None:
        return []
    elif isinstance(x, list):
        return x
    else:
        return [x]

ensure_unsigned_image(image)

Source code in src\image\util.py
85
86
87
88
89
90
91
92
93
94
def ensure_unsigned_image(image: np.ndarray) -> np.ndarray:
    source_dtype = image.dtype
    dtype = ensure_unsigned_type(source_dtype)
    if dtype != source_dtype:
        # conversion without overhead
        offset = 2 ** (8 * dtype.itemsize - 1)
        new_image = image.astype(dtype) + offset
    else:
        new_image = image
    return new_image

ensure_unsigned_type(dtype)

Source code in src\image\util.py
78
79
80
81
82
def ensure_unsigned_type(dtype: np.dtype) -> np.dtype:
    new_dtype = dtype
    if dtype.kind == 'i' or dtype.byteorder == '>' or dtype.byteorder == '<':
        new_dtype = np.dtype(f'u{dtype.itemsize}')
    return new_dtype

eval_context(data, key, default_value, context)

Source code in src\util.py
266
267
268
269
270
271
272
273
274
275
276
277
def eval_context(data, key, default_value, context):
    value = data.get(key, default_value)
    if isinstance(value, str):
        try:
            value = value.format_map(context)
        except:
            pass
        try:
            value = eval(value, context)
        except:
            pass
    return value

export_csv(filename, data, header=None)

Source code in src\util.py
602
603
604
605
606
607
608
def export_csv(filename, data, header=None):
    with open(filename, 'w', encoding='utf8', newline='') as file:
        csvwriter = csv.writer(file)
        if header is not None:
            csvwriter.writerow(header)
        for row in data:
            csvwriter.writerow(row)

export_json(filename, data)

Source code in src\util.py
591
592
593
def export_json(filename, data):
    with open(filename, 'w', encoding='utf8') as file:
        json.dump(data, file, indent=4)

filter_dict(dict0)

Source code in src\util.py
38
39
40
41
42
43
44
45
46
47
48
49
50
def filter_dict(dict0: dict) -> dict:
    new_dict = {}
    for key, value0 in dict0.items():
        if value0 is not None:
            values = []
            for value in ensure_list(value0):
                if isinstance(value, dict):
                    value = filter_dict(value)
                values.append(value)
            if len(values) == 1:
                values = values[0]
            new_dict[key] = values
    return new_dict

filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5)

Source code in src\util.py
487
488
489
490
491
492
493
def filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5):
    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) / filter_factor, 0, 1)
    order_weights = 1 - np.array(range(len(points))) / len(points) / 2
    weights = position_weights * order_weights
    return weights > threshold

filter_noise_images(images)

Source code in src\image\util.py
508
509
510
511
512
513
514
def filter_noise_images(images):
    dtype = images[0].dtype
    maxval = 2 ** (8 * dtype.itemsize) - 1
    image_vars = [np.asarray(np.std(image)).item() for image in images]
    threshold, mask0 = cv.threshold(np.array(image_vars).astype(dtype), 0, maxval, cv.THRESH_OTSU)
    mask = [flag.item() for flag in mask0.astype(bool)]
    return int(threshold), mask

find_all_numbers(text)

Source code in src\util.py
149
150
def find_all_numbers(text: str) -> list:
    return list(map(int, re.findall(r'\d+', text)))

float2int_image(image, target_dtype=np.dtype(np.uint8))

Source code in src\image\util.py
59
60
61
62
63
64
65
def float2int_image(image, target_dtype=np.dtype(np.uint8)):
    source_dtype = image.dtype
    if source_dtype.kind not in ('i', 'u') and not target_dtype.kind == 'f':
        maxval = 2 ** (8 * target_dtype.itemsize) - 1
        return (image * maxval).astype(target_dtype)
    else:
        return image

get_center(data, offset=(0, 0))

Source code in src\util.py
339
340
341
342
343
344
345
def get_center(data, offset=(0, 0)):
    moments = get_moments(data, offset=offset)
    if moments['m00'] != 0:
        center = get_moments_center(moments)
    else:
        center = np.mean(data, 0).flatten()  # close approximation
    return center.astype(np.float32)

get_center_from_transform(transform)

Source code in src\util.py
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
def get_center_from_transform(transform):
    # from opencv:
    # t0 = (1-alpha) * cx - beta * cy
    # t1 = beta * cx + (1-alpha) * cy
    # where
    # alpha = cos(angle) * scale
    # beta = sin(angle) * scale
    # isolate cx and cy:
    t0, t1 = transform[:2, 2]
    scale = 1
    angle = np.arctan2(transform[0][1], transform[0][0])
    alpha = np.cos(angle) * scale
    beta = np.sin(angle) * scale
    cx = (t1 + t0 * (1 - alpha) / beta) / (beta + (1 - alpha) ** 2 / beta)
    cy = ((1 - alpha) * cx - t0) / beta
    return cx, cy

get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None)

Source code in src\image\util.py
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
def get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None):
    if rotation is None:
        rotation = 0

    if isinstance(data, DataTree):
        sim = msi_utils.get_sim_from_msim(data)
    else:
        sim = data
    sdims = ''.join(si_utils.get_spatial_dims_from_sim(sim))
    sdims = sdims.replace('zyx', 'xyz').replace('yx', 'xy')   # order xy(z)
    origin = si_utils.get_origin_from_sim(sim)
    translation = [origin[sdim] for sdim in sdims]

    if len(translation) == 0:
        translation = [0, 0]
    if len(translation) == 2:
        if translation0 is not None and len (translation0) == 3:
            z = translation0[2]
        else:
            z = 0
        translation = list(translation) + [z]

    if transform is not None:
        translation1, rotation1, _ = get_properties_from_transform(transform, invert=True)
        translation = np.array(translation) + translation1
        rotation += rotation1

    if transform_key is not None:
        transform1 = sim.transforms[transform_key]
        translation1, rotation1, _ = get_properties_from_transform(transform1, invert=True)
        rotation += rotation1

    return translation, rotation

get_default(x, default)

Source code in src\util.py
14
15
def get_default(x, default):
    return default if x is None else x

get_filetitle(filename)

Source code in src\util.py
135
136
137
138
def get_filetitle(filename: str) -> str:
    filebase = os.path.basename(filename)
    title = os.path.splitext(filebase)[0].rstrip('.ome')
    return title

get_image_quantile(image, quantile, axis=None)

Source code in src\image\util.py
454
455
456
def get_image_quantile(image: np.ndarray, quantile: float, axis=None) -> float:
    value = np.quantile(image, quantile, axis=axis).astype(image.dtype)
    return value

get_image_size_info(sizes_xyzct, pixel_nbytes, pixel_type, channels)

Source code in src\image\util.py
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def get_image_size_info(sizes_xyzct: list, pixel_nbytes: int, pixel_type: np.dtype, channels: list) -> str:
    image_size_info = 'XYZCT:'
    size = 0
    for i, size_xyzct in enumerate(sizes_xyzct):
        w, h, zs, cs, ts = size_xyzct
        size += np.int64(pixel_nbytes) * w * h * zs * cs * ts
        if i > 0:
            image_size_info += ','
        image_size_info += f' {w} {h} {zs} {cs} {ts}'
    image_size_info += f' Pixel type: {pixel_type} Uncompressed: {print_hbytes(size)}'
    if sizes_xyzct[0][3] == 3:
        channel_info = 'rgb'
    else:
        channel_info = ','.join([channel.get('Name', '') for channel in channels])
    if channel_info != '':
        image_size_info += f' Channels: {channel_info}'
    return image_size_info

get_image_window(image, low=0.01, high=0.99)

Source code in src\image\util.py
459
460
461
462
463
464
def get_image_window(image, low=0.01, high=0.99):
    window = (
        get_image_quantile(image, low),
        get_image_quantile(image, high)
    )
    return window

get_max_downsamples(shape, npyramid_add, pyramid_downsample)

Source code in src\image\util.py
498
499
500
501
502
503
504
505
def get_max_downsamples(shape, npyramid_add, pyramid_downsample):
    shape = list(shape)
    for i in range(npyramid_add):
        shape[-1] //= pyramid_downsample
        shape[-2] //= pyramid_downsample
        if shape[-1] < 1 or shape[-2] < 1:
            return i
    return npyramid_add

get_mean_nn_distance(points1, points2)

Source code in src\util.py
483
484
def get_mean_nn_distance(points1, points2):
    return np.mean([get_nn_distance(points1), get_nn_distance(points2)])

get_moments(data, offset=(0, 0))

Source code in src\util.py
330
331
332
def get_moments(data, offset=(0, 0)):
    moments = cv.moments((np.array(data) + offset).astype(np.float32))    # doesn't work for float64!
    return moments

get_moments_center(moments, offset=(0, 0))

Source code in src\util.py
335
336
def get_moments_center(moments, offset=(0, 0)):
    return np.array([moments['m10'], moments['m01']]) / moments['m00'] + np.array(offset)

get_nn_distance(points0)

Source code in src\util.py
472
473
474
475
476
477
478
479
480
def get_nn_distance(points0):
    points = list(set(map(tuple, points0)))     # get unique points
    if len(points) >= 2:
        tree = KDTree(points, leaf_size=2)
        dist, ind = tree.query(points, k=2)
        nn_distance = np.median(dist[:, 1])
    else:
        nn_distance = 1
    return nn_distance

get_numpy_slicing(dimension_order, **slicing)

Source code in src\image\util.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
def get_numpy_slicing(dimension_order, **slicing):
    slices = []
    for axis in dimension_order:
        index = slicing.get(axis)
        index0 = slicing.get(axis + '0')
        index1 = slicing.get(axis + '1')
        if index0 is not None and index1 is not None:
            slice1 = slice(int(index0), int(index1))
        elif index is not None:
            slice1 = int(index)
        else:
            slice1 = slice(None)
        slices.append(slice1)
    return tuple(slices)

get_orthogonal_pairs(origins, image_size_um)

Get pairs of orthogonal neighbors from a list of tiles. Tiles don't have to be placed on a regular grid.

Source code in src\util.py
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
def get_orthogonal_pairs(origins, image_size_um):
    """
    Get pairs of orthogonal neighbors from a list of tiles.
    Tiles don't have to be placed on a regular grid.
    """
    pairs = []
    angles = []
    z_positions = [pos[0] for pos in origins if len(pos) == 3]
    ordered_z = sorted(set(z_positions))
    is_mixed_3dstack = len(ordered_z) < len(z_positions)
    for i, j in np.transpose(np.triu_indices(len(origins), 1)):
        origini = np.array(origins[i])
        originj = np.array(origins[j])
        if is_mixed_3dstack:
            # ignore z value for distance
            distance = math.dist(origini[-2:], originj[-2:])
            min_distance = max(image_size_um[-2:])
            z_i, z_j = origini[0], originj[0]
            is_same_z = (z_i == z_j)
            is_close_z = abs(ordered_z.index(z_i) - ordered_z.index(z_j)) <= 1
            if not is_same_z:
                # for tiles in different z stack, require greater overlap
                min_distance *= 0.8
            ok = (distance < min_distance and is_close_z)
        else:
            distance = math.dist(origini, originj)
            min_distance = max(image_size_um)
            ok = (distance < min_distance)
        if ok:
            pairs.append((i, j))
            vector = origini - originj
            angle = math.degrees(math.atan2(vector[1], vector[0]))
            if distance < min(image_size_um):
                angle += 90
            while angle < -90:
                angle += 180
            while angle > 90:
                angle -= 180
            angles.append(angle)
    return pairs, angles

get_properties_from_transform(transform, invert=False)

Source code in src\image\util.py
660
661
662
663
664
665
666
667
668
669
670
671
def get_properties_from_transform(transform, invert=False):
    if len(transform.shape) == 3:
        transform = transform[0]
    if invert:
        transform = param_utils.invert_coordinate_order(transform)
    transform = np.array(transform)
    translation = param_utils.translation_from_affine(transform)
    if len(translation) == 2:
        translation = list(translation) + [0]
    rotation = get_rotation_from_transform(transform)
    scale = get_scale_from_transform(transform)
    return translation, rotation, scale

get_rotation_from_transform(transform)

Source code in src\util.py
427
428
429
def get_rotation_from_transform(transform):
    rotation = np.rad2deg(np.arctan2(transform[0][1], transform[0][0]))
    return rotation

get_scale_from_transform(transform)

Source code in src\util.py
397
398
399
def get_scale_from_transform(transform):
    scale = np.mean(np.linalg.norm(transform, axis=0)[:-1])
    return scale

get_sim_physical_size(sim, invert=False)

Source code in src\image\util.py
620
621
622
623
624
def get_sim_physical_size(sim, invert=False):
    size = si_utils.get_shape_from_sim(sim, asarray=True) * si_utils.get_spacing_from_sim(sim, asarray=True)
    if invert:
        size = np.flip(size)
    return size

get_sim_position_final(sim)

Source code in src\image\util.py
540
541
542
543
544
545
def get_sim_position_final(sim):
    transform_keys = si_utils.get_tranform_keys_from_sim(sim)
    transform = combine_transforms([np.array(si_utils.get_affine_from_sim(sim, transform_key))
                                    for transform_key in transform_keys])
    position = apply_transform([si_utils.get_origin_from_sim(sim, asarray=True)], transform)[0]
    return position

get_sim_shape_2d(sim, transform_key=None)

Source code in src\image\util.py
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
def get_sim_shape_2d(sim, transform_key=None):
    if 't' in sim.coords.xindexes:
        # work-around for points error in get_overlap_bboxes()
        sim1 = si_utils.sim_sel_coords(sim, {'t': 0})
    else:
        sim1 = sim
    stack_props = si_utils.get_stack_properties_from_sim(sim1, transform_key=transform_key)
    vertices = mv_graph.get_vertices_from_stack_props(stack_props)
    if vertices.shape[1] == 3:
        # remove z coordinate
        vertices = vertices[:, 1:]
    if len(vertices) >= 8:
        # remove redundant x/y vertices
        vertices = vertices[:4]
    if len(vertices) >= 4:
        # last 2 vertices appear to be swapped
        vertices[2:] = np.array(list(reversed(vertices[2:])))
    return vertices

get_translation_from_transform(transform)

Source code in src\util.py
402
403
404
405
406
def get_translation_from_transform(transform):
    ndim = len(transform) - 1
    #translation = transform[:ndim, ndim]
    translation = apply_transform([[0] * ndim], transform)[0]
    return translation

get_unique_file_labels(filenames)

Source code in src\util.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def get_unique_file_labels(filenames: list) -> list:
    file_labels = []
    file_parts = []
    label_indices = set()
    last_parts = None
    for filename in filenames:
        parts = split_numeric(filename)
        if len(parts) == 0:
            parts = split_numeric(filename)
            if len(parts) == 0:
                parts = filename
        file_parts.append(parts)
        if last_parts is not None:
            for parti, (part1, part2) in enumerate(zip(last_parts, parts)):
                if part1 != part2:
                    label_indices.add(parti)
        last_parts = parts
    label_indices = sorted(list(label_indices))

    for file_part in file_parts:
        file_label = '_'.join([file_part[i] for i in label_indices])
        file_labels.append(file_label)

    if len(set(file_labels)) < len(file_labels):
        # fallback for duplicate labels
        file_labels = [get_filetitle(filename) for filename in filenames]

    return file_labels

get_value_units_micrometer(value_units0)

Source code in src\util.py
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def get_value_units_micrometer(value_units0: list|dict) -> list|dict|None:
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    if value_units0 is None:
        return None

    if isinstance(value_units0, dict):
        values_um = {}
        for dim, value_unit in value_units0.items():
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um[dim] = value_um
    else:
        values_um = []
        for value_unit in value_units0:
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um.append(value_um)
    return values_um

grayscale_image(image)

Source code in src\image\util.py
32
33
34
35
36
37
38
39
def grayscale_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 4:
        return cv.cvtColor(image, cv.COLOR_RGBA2GRAY)
    elif nchannels > 1:
        return cv.cvtColor(image, cv.COLOR_RGB2GRAY)
    else:
        return image

group_sims_by_z(sims)

Source code in src\image\util.py
548
549
550
551
552
553
554
555
556
557
558
559
560
561
def group_sims_by_z(sims):
    grouped_sims = []
    z_positions = [si_utils.get_origin_from_sim(sim).get('z') for sim in sims]
    is_mixed_3dstack = len(set(z_positions)) < len(z_positions)
    if is_mixed_3dstack:
        sims_by_z = {}
        for simi, z_pos in enumerate(z_positions):
            if z_pos is not None and z_pos not in sims_by_z:
                sims_by_z[z_pos] = []
            sims_by_z[z_pos].append(simi)
        grouped_sims = list(sims_by_z.values())
    if len(grouped_sims) == 0:
        grouped_sims = [list(range(len(sims)))]
    return grouped_sims

image_reshape(image, target_size)

Source code in src\image\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
def image_reshape(image: np.ndarray, target_size: tuple) -> np.ndarray:
    tw, th = target_size
    sh, sw = image.shape[0:2]
    if sw < tw or sh < th:
        dw = max(tw - sw, 0)
        dh = max(th - sh, 0)
        padding = [(dh // 2, dh - dh //  2), (dw // 2, dw - dw // 2)]
        if len(image.shape) == 3:
            padding += [(0, 0)]
        image = np.pad(image, padding, mode='constant', constant_values=(0, 0))
    if tw < sw or th < sh:
        image = image[0:th, 0:tw]
    return image

image_resize(image, target_size0, dimension_order='yxc')

Source code in src\image\util.py
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
def image_resize(image: np.ndarray, target_size0: tuple, dimension_order: str = 'yxc') -> np.ndarray:
    shape = image.shape
    x_index = dimension_order.index('x')
    y_index = dimension_order.index('y')
    c_is_at_end = ('c' in dimension_order and dimension_order.endswith('c'))
    size = shape[x_index], shape[y_index]
    if np.mean(np.divide(size, target_size0)) < 1:
        interpolation = cv.INTER_CUBIC
    else:
        interpolation = cv.INTER_AREA
    dtype0 = image.dtype
    image = ensure_unsigned_image(image)
    target_size = tuple(np.maximum(np.round(target_size0).astype(int), 1))
    if dimension_order in ['yxc', 'yx']:
        new_image = cv.resize(np.asarray(image), target_size, interpolation=interpolation)
    elif dimension_order == 'cyx':
        new_image = np.moveaxis(image, 0, -1)
        new_image = cv.resize(np.asarray(new_image), target_size, interpolation=interpolation)
        new_image = np.moveaxis(new_image, -1, 0)
    else:
        ts = image.shape[dimension_order.index('t')] if 't' in dimension_order else 1
        zs = image.shape[dimension_order.index('z')] if 'z' in dimension_order else 1
        target_shape = list(image.shape).copy()
        target_shape[x_index] = target_size[0]
        target_shape[y_index] = target_size[1]
        new_image = np.zeros(target_shape, dtype=image.dtype)
        for t in range(ts):
            for z in range(zs):
                slices = get_numpy_slicing(dimension_order, z=z, t=t)
                image1 = image[slices]
                if not c_is_at_end:
                    image1 = np.moveaxis(image1, 0, -1)
                new_image1 = np.atleast_3d(cv.resize(np.asarray(image1), target_size, interpolation=interpolation))
                if not c_is_at_end:
                    new_image1 = np.moveaxis(new_image1, -1, 0)
                new_image[slices] = new_image1
    new_image = convert_image_sign_type(new_image, dtype0)
    return new_image

import_csv(filename)

Source code in src\util.py
596
597
598
599
def import_csv(filename):
    with open(filename, encoding='utf8') as file:
        data = csv.reader(file)
    return data

import_json(filename)

Source code in src\util.py
585
586
587
588
def import_json(filename):
    with open(filename, encoding='utf8') as file:
        data = json.load(file)
    return data

import_metadata(content, fields=None, input_path=None)

Source code in src\util.py
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
def import_metadata(content, fields=None, input_path=None):
    # return dict[id] = {values}
    if isinstance(content, str):
        ext = os.path.splitext(content)[1].lower()
        if input_path:
            if isinstance(input_path, list):
                input_path = input_path[0]
            content = os.path.normpath(os.path.join(os.path.dirname(input_path), content))
        if ext == '.csv':
            content = import_csv(content)
        elif ext in ['.json', '.ome.json']:
            content = import_json(content)
    if fields is not None:
        content = [[data[field] for field in fields] for data in content]
    return content

int2float_image(image)

Source code in src\image\util.py
50
51
52
53
54
55
56
def int2float_image(image):
    source_dtype = image.dtype
    if not source_dtype.kind == 'f':
        maxval = 2 ** (8 * source_dtype.itemsize) - 1
        return image / np.float32(maxval)
    else:
        return image

norm_image_quantiles(image0, quantile=0.99)

Source code in src\image\util.py
484
485
486
487
488
489
490
491
492
493
494
495
def norm_image_quantiles(image0, quantile=0.99):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    min_value = np.quantile(image, 1 - quantile)
    max_value = np.quantile(image, quantile)
    normimage = (image - np.mean(image)) / (max_value - min_value)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

norm_image_variance(image0)

Source code in src\image\util.py
472
473
474
475
476
477
478
479
480
481
def norm_image_variance(image0):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    normimage = (image - np.mean(image)) / np.std(image)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

normalise(sims, transform_key, use_global=True)

Source code in src\image\util.py
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
def normalise(sims, transform_key, use_global=True):
    new_sims = []
    dtype = sims[0].dtype
    # global mean and stddev
    if use_global:
        mins = []
        ranges = []
        for sim in sims:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
            #min, max = get_image_window(sim, low=0.01, high=0.99)
            #range = max - min
            mins.append(min)
            ranges.append(range)
        min = np.mean(mins)
        range = np.mean(ranges)
    else:
        min = 0
        range = 1
    # normalise all images
    for sim in sims:
        if not use_global:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
        image = (sim - min) / range
        image = float2int_image(image.clip(0, 1), dtype)    # np.clip(image) is not dask-compatible, use image.clip() instead
        new_sim = si_utils.get_sim_from_array(
            image,
            dims=sim.dims,
            scale=si_utils.get_spacing_from_sim(sim),
            translation=si_utils.get_origin_from_sim(sim),
            transform_key=transform_key,
            affine=si_utils.get_affine_from_sim(sim, transform_key),
            c_coords=sim.c.data,
            t_coords=sim.t.data
        )
        new_sims.append(new_sim)
    return new_sims

normalise_rotated_positions(positions0, rotations0, size, center)

Source code in src\util.py
456
457
458
459
460
461
462
463
464
465
466
467
468
469
def normalise_rotated_positions(positions0, rotations0, size, center):
    # in [xy(z)]
    positions = []
    rotations = []
    _, angles = get_orthogonal_pairs(positions0, size)
    for position0, rotation in zip(positions0, rotations0):
        if rotation is None and len(angles) > 0:
            rotation = -np.mean(angles)
        angle = -rotation if rotation is not None else None
        transform = create_transform(center=center, angle=angle, matrix_size=4)
        position = apply_transform([position0], transform)[0]
        positions.append(position)
        rotations.append(rotation)
    return positions, rotations

normalise_rotation(rotation)

Normalise rotation to be in the range [-180, 180].

Source code in src\util.py
432
433
434
435
436
437
438
439
440
def normalise_rotation(rotation):
    """
    Normalise rotation to be in the range [-180, 180].
    """
    while rotation < -180:
        rotation += 360
    while rotation > 180:
        rotation -= 360
    return rotation

normalise_values(image, min_value, max_value)

Source code in src\image\util.py
467
468
469
def normalise_values(image: np.ndarray, min_value: float, max_value: float) -> np.ndarray:
    image = (image.astype(np.float32) - min_value) / (max_value - min_value)
    return image.clip(0, 1)

pilmode_to_pixelinfo(mode)

Source code in src\image\util.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
def pilmode_to_pixelinfo(mode: str) -> tuple:
    pixelinfo = (np.uint8, 8, 1)
    mode_types = {
        'I': (np.uint32, 32, 1),
        'F': (np.float32, 32, 1),
        'RGB': (np.uint8, 24, 3),
        'RGBA': (np.uint8, 32, 4),
        'CMYK': (np.uint8, 32, 4),
        'YCbCr': (np.uint8, 24, 3),
        'LAB': (np.uint8, 24, 3),
        'HSV': (np.uint8, 24, 3),
    }
    if '16' in mode:
        pixelinfo = (np.uint16, 16, 1)
    elif '32' in mode:
        pixelinfo = (np.uint32, 32, 1)
    elif mode in mode_types:
        pixelinfo = mode_types[mode]
    pixelinfo = (np.dtype(pixelinfo[0]), pixelinfo[1])
    return pixelinfo

points_to_3d(points)

Source code in src\util.py
443
444
def points_to_3d(points):
    return [list(point) + [0] for point in points]

precise_resize(image, factors)

Source code in src\image\util.py
272
273
274
275
276
def precise_resize(image: np.ndarray, factors) -> np.ndarray:
    if image.ndim > len(factors):
        factors = list(factors) + [1]
    new_image = downscale_local_mean(np.asarray(image), tuple(factors)).astype(image.dtype)
    return new_image

print_dict(dct, indent=0)

Source code in src\util.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def print_dict(dct: dict, indent: int = 0) -> str:
    s = ''
    if isinstance(dct, dict):
        for key, value in dct.items():
            s += '\n'
            if not isinstance(value, list):
                s += '\t' * indent + str(key) + ': '
            if isinstance(value, dict):
                s += print_dict(value, indent=indent + 1)
            elif isinstance(value, list):
                for v in value:
                    s += print_dict(v)
            else:
                s += str(value)
    else:
        s += str(dct)
    return s

print_hbytes(nbytes)

Source code in src\util.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def print_hbytes(nbytes: int) -> str:
    exps = ['', 'K', 'M', 'G', 'T', 'P', 'E']
    div = 1024
    exp = 0

    while nbytes > div:
        nbytes /= div
        exp += 1
    if exp < len(exps):
        e = exps[exp]
    else:
        e = f'e{exp * 3}'
    return f'{nbytes:.1f}{e}B'

redimension_data(data, old_order, new_order, **indices)

Source code in src\image\util.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def redimension_data(data, old_order, new_order, **indices):
    # able to provide optional dimension values e.g. t=0, z=0
    if new_order == old_order:
        return data

    new_data = data
    order = old_order
    # remove
    for o in old_order:
        if o not in new_order:
            index = order.index(o)
            dim_value = indices.get(o, 0)
            new_data = np.take(new_data, indices=dim_value, axis=index)
            order = order[:index] + order[index + 1:]
    # add
    for o in new_order:
        if o not in order:
            new_data = np.expand_dims(new_data, 0)
            order = o + order
    # move
    old_indices = [order.index(o) for o in new_order]
    new_indices = list(range(len(new_order)))
    new_data = np.moveaxis(new_data, old_indices, new_indices)
    return new_data

reorder(items, old_order, new_order, default_value=0)

Source code in src\util.py
27
28
29
30
31
32
33
34
35
def reorder(items: list, old_order: str, new_order: str, default_value: int = 0) -> list:
    new_items = []
    for label in new_order:
        if label in old_order:
            item = items[old_order.index(label)]
        else:
            item = default_value
        new_items.append(item)
    return new_items

resize_image(image, new_size)

Source code in src\image\util.py
224
225
226
227
228
229
def resize_image(image, new_size):
    if not isinstance(new_size, (tuple, list, np.ndarray)):
        # use single value for width; apply aspect ratio
        size = np.flip(image.shape[:2])
        new_size = new_size, new_size * size[1] // size[0]
    return cv.resize(image, new_size)

retuple(chunks, shape)

Expand chunks to match shape.

E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028) return (3, 4, 5, 64, 64)

If chunks is an integer, it is applied to all dimensions, to match the behaviour of zarr-python.

Source code in src\util.py
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def retuple(chunks, shape):
    # from ome-zarr-py
    """
    Expand chunks to match shape.

    E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028)
    return (3, 4, 5, 64, 64)

    If chunks is an integer, it is applied to all dimensions, to match
    the behaviour of zarr-python.
    """

    if isinstance(chunks, int):
        return tuple([chunks] * len(shape))

    dims_to_add = len(shape) - len(chunks)
    return *shape[:dims_to_add], *chunks

round_significants(a, significant_digits)

Source code in src\util.py
124
125
126
127
128
def round_significants(a: float, significant_digits: int) -> float:
    if a != 0:
        round_decimals = significant_digits - int(np.floor(np.log10(abs(a)))) - 1
        return round(a, round_decimals)
    return a

show_image(image, title='', cmap=None)

Source code in src\image\util.py
22
23
24
25
26
27
28
29
def show_image(image, title='', cmap=None):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if cmap is None:
        cmap = 'gray' if nchannels == 1 else None
    plt.imshow(image, cmap=cmap)
    if title != '':
        plt.title(title)
    plt.show()

split_num_text(text)

Source code in src\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
def split_num_text(text: str) -> list:
    num_texts = []
    block = ''
    is_num0 = None
    if text is None:
        return None

    for c in text:
        is_num = (c.isnumeric() or c == '.')
        if is_num0 is not None and is_num != is_num0:
            num_texts.append(block)
            block = ''
        block += c
        is_num0 = is_num
    if block != '':
        num_texts.append(block)

    num_texts2 = []
    for block in num_texts:
        block = block.strip()
        try:
            block = float(block)
        except:
            pass
        if block not in [' ', ',', '|']:
            num_texts2.append(block)
    return num_texts2

split_numeric(text)

Source code in src\util.py
153
154
155
156
157
158
159
160
def split_numeric(text: str) -> list:
    num_parts = []
    parts = re.split(r'[_/\\.]', text)
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            num_parts.append(part)
    return num_parts

split_numeric_dict(text)

Source code in src\util.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
def split_numeric_dict(text: str) -> dict:
    num_parts = {}
    parts = re.split(r'[_/\\.]', text)
    parti = 0
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            index = num_span.start()
            label = part[:index]
            if label == '':
                label = parti
            num_parts[label] = num_span.group()
            parti += 1
    return num_parts

split_path(path)

Source code in src\util.py
131
132
def split_path(path: str) -> list:
    return os.path.normpath(path).split(os.path.sep)

split_value_unit_list(text)

Source code in src\util.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
def split_value_unit_list(text: str) -> list:
    value_units = []
    if text is None:
        return None

    items = split_num_text(text)
    if isinstance(items[-1], str):
        def_unit = items[-1]
    else:
        def_unit = ''

    i = 0
    while i < len(items):
        value = items[i]
        if i + 1 < len(items):
            unit = items[i + 1]
        else:
            unit = ''
        if not isinstance(value, str):
            if isinstance(unit, str):
                i += 1
            else:
                unit = def_unit
            value_units.append((value, unit))
        i += 1
    return value_units

uint8_image(image)

Source code in src\image\util.py
68
69
70
71
72
73
74
75
def uint8_image(image):
    source_dtype = image.dtype
    if source_dtype.kind == 'f':
        image = image * 255
    elif source_dtype.itemsize != 1:
        factor = 2 ** (8 * (source_dtype.itemsize - 1))
        image = image // factor
    return image.astype(np.uint8)

validate_transform(transform, max_rotation=None)

Source code in src\util.py
382
383
384
385
386
387
388
389
390
391
392
393
394
def validate_transform(transform, max_rotation=None):
    if transform is None:
        return False
    transform = np.array(transform)
    if np.any(np.isnan(transform)):
        return False
    if np.any(np.isinf(transform)):
        return False
    if np.linalg.det(transform) == 0:
        return False
    if  max_rotation is not None and abs(normalise_rotation(get_rotation_from_transform(transform))) > max_rotation:
        return False
    return True

xyz_to_dict(xyz, axes='xyz')

Source code in src\util.py
447
448
449
def xyz_to_dict(xyz, axes='xyz'):
    dct = {dim: value for dim, value in zip(axes, xyz)}
    return dct

MVSRegistrationNapari

MVSRegistrationNapari

Bases: MVSRegistration, QObject

Source code in src\MVSRegistrationNapari.py
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
class MVSRegistrationNapari(MVSRegistration, QObject):
    update_napari_signal = Signal(str, list, list)

    def __init__(self, params_general, viewer):
        super().__init__(params_general)
        self.viewer = viewer
        self.update_napari_signal.connect(self.update_napari)

    @Slot(str, list, list)
    def update_napari(self, layer_name, shapes, labels):
        if len(shapes) > 0:
            text = {'string': '{labels}'}
            features = {'labels': labels}
            self.viewer.add_shapes(shapes, name=layer_name, text=text, features=features, opacity=0.5)
            self.viewer.show()

update_napari_signal = Signal(str, list, list) class-attribute instance-attribute

viewer = viewer instance-attribute

__init__(params_general, viewer)

Source code in src\MVSRegistrationNapari.py
 9
10
11
12
def __init__(self, params_general, viewer):
    super().__init__(params_general)
    self.viewer = viewer
    self.update_napari_signal.connect(self.update_napari)

update_napari(layer_name, shapes, labels)

Source code in src\MVSRegistrationNapari.py
14
15
16
17
18
19
20
@Slot(str, list, list)
def update_napari(self, layer_name, shapes, labels):
    if len(shapes) > 0:
        text = {'string': '{labels}'}
        features = {'labels': labels}
        self.viewer.add_shapes(shapes, name=layer_name, text=text, features=features, opacity=0.5)
        self.viewer.show()

Pipeline

Pipeline

Bases: Thread

Source code in src\Pipeline.py
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
class Pipeline(Thread):
    def __init__(self, params, viewer=None):
        super().__init__()
        self.params = params
        self.viewer = viewer

        self.params_general = params['general']
        self.init_logging()

        napari_ui = 'napari' in self.params_general.get('ui', '')
        if napari_ui:
            from src.MVSRegistrationNapari import MVSRegistrationNapari
            self.mvs_registration = MVSRegistrationNapari(self.params_general, self.viewer)
        else:
            from src.MVSRegistration import MVSRegistration
            self.mvs_registration = MVSRegistration(self.params_general)

    def init_logging(self):
        params_logging = self.params_general.get('logging', {})
        self.log_filename = params_logging.get('filename', 'muvis-align.log')
        self.verbose = params_logging.get('verbose', False)
        logging_mvs = params_logging.get('mvs', False)
        log_format = params_logging.get('format')
        basepath = os.path.dirname(self.log_filename)
        if not os.path.exists(basepath):
            os.makedirs(basepath)

        handlers = [logging.FileHandler(self.log_filename, encoding='utf-8')]
        if self.verbose:
            handlers += [logging.StreamHandler()]
        logging.basicConfig(level=logging.INFO, format=log_format, handlers=handlers, encoding='utf-8')

        # verbose external modules
        if logging_mvs:
            # expose multiview_stitcher.registration logger and make more verbose
            mvsr_logger = logging.getLogger('multiview_stitcher.registration')
            mvsr_logger.setLevel(logging.INFO)
            if len(mvsr_logger.handlers) == 0:
                mvsr_logger.addHandler(logging.StreamHandler())
        else:
            # reduce verbose level
            for module in ['multiview_stitcher', 'multiview_stitcher.registration', 'multiview_stitcher.fusion']:
                logging.getLogger(module).setLevel(logging.WARNING)

        for module in ['ome_zarr']:
            logging.getLogger(module).setLevel(logging.WARNING)

        logging.info(f'muvis-align version {version}')

    def run(self):
        break_on_error = self.params_general.get('break_on_error', False)
        for operation_params in tqdm(self.params['operations']):
            error = False
            input_path = operation_params['input']
            logging.info(f'Input: {input_path}')
            try:
                self.run_operation(operation_params)
            except Exception as e:
                logging.exception(f'Error processing: {input_path}')
                print(f'Error processing: {input_path}: {e}')
                error = True

            if error and break_on_error:
                break

        logging.info('Done!')

    def run_operation(self, params):
        operation = params['operation']
        use_global_metadata = 'global' in params.get('source_metadata', '')
        metadata_summary = self.params_general.get('metadata_summary', False)

        filenames = dir_regex(params['input'])
        filenames = sorted(filenames, key=lambda file: list(find_all_numbers(file)))    # sort first key first
        if len(filenames) == 0:
            logging.warning(f'Skipping operation {operation} (no files)')
            return False
        elif self.verbose:
            logging.info(f'# total files: {len(filenames)}')

        operation_parts = operation.split()
        if 'match' in operation_parts:
            # check if match label provided
            index = operation_parts.index('match') + 1
            if index < len(operation_parts):
                match_label = operation_parts[index]
            else:
                match_label = None
            matches = {}
            for filename in filenames:
                parts = split_numeric_dict(filename)
                match_value = parts.get(match_label)
                if match_value is not None:
                    if match_value.isdecimal():
                        match_value = int(match_value)
                    if match_value not in matches:
                        matches[match_value] = []
                    matches[match_value].append(filename)
                if len(matches) == 0:
                    matches[0] = filenames
            filesets = []
            fileset_labels = []
            for label in sorted(matches):
                filesets.append(matches[label])
                fileset_labels.append(f'{match_label}:{label}')
            logging.info(f'# matched file sets: {len(filesets)}')
        else:
            filesets = [filenames]
            fileset_labels = [get_filetitle(filename) for filename in filenames]

        metadatas = []
        rotations = []
        global_center = None
        if metadata_summary or use_global_metadata:
            for fileset, fileset_label in zip(filesets, fileset_labels):
                metadata = get_images_metadata(fileset, params.get('source_metadata'))
                if metadata_summary:
                    logging.info(f'File set: {fileset_label} metadata:\n' + metadata['summary'])
                metadatas.append(metadata)
            if use_global_metadata:
                global_center = np.mean([metadata['center'] for metadata in metadatas], 0)
                rotations = [metadata['rotation'] for metadata in metadatas]
                # fix missing rotation values
                rotations = pd.Series(rotations).interpolate(limit_direction='both').to_numpy()

        ok = False
        for index, (fileset, fileset_label) in enumerate(zip(filesets, fileset_labels)):
            if len(filesets) > 1:
                logging.info(f'File set: {fileset_label}')
            center = global_center if use_global_metadata else None
            rotation = rotations[index] if use_global_metadata else None
            ok |= self.mvs_registration.run_operation(fileset_label, fileset, params,
                                                      global_center=center, global_rotation=rotation)

        return ok

mvs_registration = MVSRegistrationNapari(self.params_general, self.viewer) instance-attribute

params = params instance-attribute

params_general = params['general'] instance-attribute

viewer = viewer instance-attribute

__init__(params, viewer=None)

Source code in src\Pipeline.py
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
def __init__(self, params, viewer=None):
    super().__init__()
    self.params = params
    self.viewer = viewer

    self.params_general = params['general']
    self.init_logging()

    napari_ui = 'napari' in self.params_general.get('ui', '')
    if napari_ui:
        from src.MVSRegistrationNapari import MVSRegistrationNapari
        self.mvs_registration = MVSRegistrationNapari(self.params_general, self.viewer)
    else:
        from src.MVSRegistration import MVSRegistration
        self.mvs_registration = MVSRegistration(self.params_general)

init_logging()

Source code in src\Pipeline.py
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def init_logging(self):
    params_logging = self.params_general.get('logging', {})
    self.log_filename = params_logging.get('filename', 'muvis-align.log')
    self.verbose = params_logging.get('verbose', False)
    logging_mvs = params_logging.get('mvs', False)
    log_format = params_logging.get('format')
    basepath = os.path.dirname(self.log_filename)
    if not os.path.exists(basepath):
        os.makedirs(basepath)

    handlers = [logging.FileHandler(self.log_filename, encoding='utf-8')]
    if self.verbose:
        handlers += [logging.StreamHandler()]
    logging.basicConfig(level=logging.INFO, format=log_format, handlers=handlers, encoding='utf-8')

    # verbose external modules
    if logging_mvs:
        # expose multiview_stitcher.registration logger and make more verbose
        mvsr_logger = logging.getLogger('multiview_stitcher.registration')
        mvsr_logger.setLevel(logging.INFO)
        if len(mvsr_logger.handlers) == 0:
            mvsr_logger.addHandler(logging.StreamHandler())
    else:
        # reduce verbose level
        for module in ['multiview_stitcher', 'multiview_stitcher.registration', 'multiview_stitcher.fusion']:
            logging.getLogger(module).setLevel(logging.WARNING)

    for module in ['ome_zarr']:
        logging.getLogger(module).setLevel(logging.WARNING)

    logging.info(f'muvis-align version {version}')

run()

Source code in src\Pipeline.py
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
def run(self):
    break_on_error = self.params_general.get('break_on_error', False)
    for operation_params in tqdm(self.params['operations']):
        error = False
        input_path = operation_params['input']
        logging.info(f'Input: {input_path}')
        try:
            self.run_operation(operation_params)
        except Exception as e:
            logging.exception(f'Error processing: {input_path}')
            print(f'Error processing: {input_path}: {e}')
            error = True

        if error and break_on_error:
            break

    logging.info('Done!')

run_operation(params)

Source code in src\Pipeline.py
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
def run_operation(self, params):
    operation = params['operation']
    use_global_metadata = 'global' in params.get('source_metadata', '')
    metadata_summary = self.params_general.get('metadata_summary', False)

    filenames = dir_regex(params['input'])
    filenames = sorted(filenames, key=lambda file: list(find_all_numbers(file)))    # sort first key first
    if len(filenames) == 0:
        logging.warning(f'Skipping operation {operation} (no files)')
        return False
    elif self.verbose:
        logging.info(f'# total files: {len(filenames)}')

    operation_parts = operation.split()
    if 'match' in operation_parts:
        # check if match label provided
        index = operation_parts.index('match') + 1
        if index < len(operation_parts):
            match_label = operation_parts[index]
        else:
            match_label = None
        matches = {}
        for filename in filenames:
            parts = split_numeric_dict(filename)
            match_value = parts.get(match_label)
            if match_value is not None:
                if match_value.isdecimal():
                    match_value = int(match_value)
                if match_value not in matches:
                    matches[match_value] = []
                matches[match_value].append(filename)
            if len(matches) == 0:
                matches[0] = filenames
        filesets = []
        fileset_labels = []
        for label in sorted(matches):
            filesets.append(matches[label])
            fileset_labels.append(f'{match_label}:{label}')
        logging.info(f'# matched file sets: {len(filesets)}')
    else:
        filesets = [filenames]
        fileset_labels = [get_filetitle(filename) for filename in filenames]

    metadatas = []
    rotations = []
    global_center = None
    if metadata_summary or use_global_metadata:
        for fileset, fileset_label in zip(filesets, fileset_labels):
            metadata = get_images_metadata(fileset, params.get('source_metadata'))
            if metadata_summary:
                logging.info(f'File set: {fileset_label} metadata:\n' + metadata['summary'])
            metadatas.append(metadata)
        if use_global_metadata:
            global_center = np.mean([metadata['center'] for metadata in metadatas], 0)
            rotations = [metadata['rotation'] for metadata in metadatas]
            # fix missing rotation values
            rotations = pd.Series(rotations).interpolate(limit_direction='both').to_numpy()

    ok = False
    for index, (fileset, fileset_label) in enumerate(zip(filesets, fileset_labels)):
        if len(filesets) > 1:
            logging.info(f'File set: {fileset_label}')
        center = global_center if use_global_metadata else None
        rotation = rotations[index] if use_global_metadata else None
        ok |= self.mvs_registration.run_operation(fileset_label, fileset, params,
                                                  global_center=center, global_rotation=rotation)

    return ok

Timer

Timer

Bases: object

Source code in src\Timer.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
class Timer(object):
    def __init__(self, title, auto_unit=True, verbose=True):
        self.title = title
        self.auto_unit = auto_unit
        self.verbose = verbose

    def __enter__(self):
        self.ptime_start = time.process_time()
        self.time_start = time.time()

    def __exit__(self, type, value, traceback):
        if self.verbose:
            ptime_end = time.process_time()
            time_end = time.time()
            pelapsed = ptime_end - self.ptime_start
            elapsed = time_end - self.time_start
            unit = 'seconds'
            if self.auto_unit and elapsed >= 60:
                pelapsed /= 60
                elapsed /= 60
                unit = 'minutes'
                if elapsed >= 60:
                    pelapsed /= 60
                    elapsed /= 60
                    unit = 'hours'
            logging.info(f'Time {self.title}: {elapsed:.1f} ({pelapsed:.1f}) {unit}')

auto_unit = auto_unit instance-attribute

title = title instance-attribute

verbose = verbose instance-attribute

__enter__()

Source code in src\Timer.py
13
14
15
def __enter__(self):
    self.ptime_start = time.process_time()
    self.time_start = time.time()

__exit__(type, value, traceback)

Source code in src\Timer.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
def __exit__(self, type, value, traceback):
    if self.verbose:
        ptime_end = time.process_time()
        time_end = time.time()
        pelapsed = ptime_end - self.ptime_start
        elapsed = time_end - self.time_start
        unit = 'seconds'
        if self.auto_unit and elapsed >= 60:
            pelapsed /= 60
            elapsed /= 60
            unit = 'minutes'
            if elapsed >= 60:
                pelapsed /= 60
                elapsed /= 60
                unit = 'hours'
        logging.info(f'Time {self.title}: {elapsed:.1f} ({pelapsed:.1f}) {unit}')

__init__(title, auto_unit=True, verbose=True)

Source code in src\Timer.py
 8
 9
10
11
def __init__(self, title, auto_unit=True, verbose=True):
    self.title = title
    self.auto_unit = auto_unit
    self.verbose = verbose

constants

tiff_extension = '.ome.tiff' module-attribute

version = '0.2.10' module-attribute

zarr_extension = '.ome.zarr' module-attribute

image

DaskSource

DaskSource

Source code in src\image\DaskSource.py
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
class DaskSource:
    default_physical_unit = 'µm'

    def __init__(self, filename, source_metadata=None):
        self.filename = filename
        self.dimension_order = ''
        self.is_rgb = False
        self.shapes = []
        self.shape = []
        self.dtype = None
        self.pixel_sizes = []
        self.pixel_size = {}
        self.scales = []
        self.positions = []
        self.position = {}
        self.rotation = 0
        self.channels = []
        self.init_metadata()
        self.fix_metadata(source_metadata)

    def init_metadata(self):
        raise NotImplementedError("Dask source should implement init_metadata() to initialize metadata")

    def fix_metadata(self, source_metadata=None):
        if isinstance(source_metadata, dict):
            filename_numeric = find_all_numbers(self.filename)
            filename_dict = {key: int(value) for key, value in split_numeric_dict(self.filename).items()}
            context = {'filename_numeric': filename_numeric, 'fn': filename_numeric} | filename_dict
            if 'position' in source_metadata:
                translation0 = source_metadata['position']
                if 'x' in translation0:
                    self.position['x'] = eval_context(translation0, 'x', 0, context)
                if 'y' in translation0:
                    self.position['y'] = eval_context(translation0, 'y', 0, context)
                if 'z' in translation0:
                    self.position['z'] = eval_context(translation0, 'z', 0, context)
            if 'scale' in source_metadata:
                scale0 = source_metadata['scale']
                if 'x' in scale0:
                    self.pixel_size['x'] = eval_context(scale0, 'x', 1, context)
                if 'y' in scale0:
                    self.pixel_size['y'] = eval_context(scale0, 'y', 1, context)
                if 'z' in scale0:
                    self.pixel_size['z'] = eval_context(scale0, 'z', 1, context)
            if 'rotation' in source_metadata:
                self.rotation = source_metadata['rotation']

        for shape in self.shapes:
            scale1 = []
            for dim in 'xy':
                index = self.dimension_order.index(dim)
                scale1.append(self.shape[index] / shape[index])
            self.scales.append(np.mean(scale1))

    def get_shape(self, level=0):
        # shape in pixels
        return self.shapes[level]

    def get_size(self, level=0):
        # size in pixels
        return {dim: size for dim, size in zip(self.dimension_order, self.get_shape(level))}

    def get_pixel_size(self, level=0):
        # pixel size in micrometers
        if self.pixel_sizes:
            pixel_size = get_value_units_micrometer(self.pixel_sizes[level])
        else:
            scale = self.scales[level]
            pixel_size0 = get_value_units_micrometer(self.pixel_size)
            pixel_size = {dim: size * scale for dim, size in pixel_size0.items()}
        return pixel_size

    def get_physical_size(self):
        pixel_size = self.get_pixel_size()
        size = self.get_size()
        return {dim: size[dim] * pixel_size[dim] for dim in pixel_size.keys() if dim in size}

    def get_position(self, level=0):
        # position in micrometers
        if self.positions:
            return get_value_units_micrometer(self.positions[level])
        else:
            return get_value_units_micrometer(self.position)

    def get_rotation(self):
        # rotation in degrees
        return self.rotation

    def get_nchannels(self):
        return self.get_size().get('c', 1)

    def get_channels(self):
        if len(self.channels) == 0:
            if self.is_rgb:
                return [{'label': ''}]
            else:
                return [{'label': ''}] * self.get_nchannels()
        return self.channels

    def get_data(self, level=0):
        raise NotImplementedError()
channels = [] instance-attribute
default_physical_unit = 'µm' class-attribute instance-attribute
dimension_order = '' instance-attribute
dtype = None instance-attribute
filename = filename instance-attribute
is_rgb = False instance-attribute
pixel_size = {} instance-attribute
pixel_sizes = [] instance-attribute
position = {} instance-attribute
positions = [] instance-attribute
rotation = 0 instance-attribute
scales = [] instance-attribute
shape = [] instance-attribute
shapes = [] instance-attribute
__init__(filename, source_metadata=None)
Source code in src\image\DaskSource.py
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
def __init__(self, filename, source_metadata=None):
    self.filename = filename
    self.dimension_order = ''
    self.is_rgb = False
    self.shapes = []
    self.shape = []
    self.dtype = None
    self.pixel_sizes = []
    self.pixel_size = {}
    self.scales = []
    self.positions = []
    self.position = {}
    self.rotation = 0
    self.channels = []
    self.init_metadata()
    self.fix_metadata(source_metadata)
fix_metadata(source_metadata=None)
Source code in src\image\DaskSource.py
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def fix_metadata(self, source_metadata=None):
    if isinstance(source_metadata, dict):
        filename_numeric = find_all_numbers(self.filename)
        filename_dict = {key: int(value) for key, value in split_numeric_dict(self.filename).items()}
        context = {'filename_numeric': filename_numeric, 'fn': filename_numeric} | filename_dict
        if 'position' in source_metadata:
            translation0 = source_metadata['position']
            if 'x' in translation0:
                self.position['x'] = eval_context(translation0, 'x', 0, context)
            if 'y' in translation0:
                self.position['y'] = eval_context(translation0, 'y', 0, context)
            if 'z' in translation0:
                self.position['z'] = eval_context(translation0, 'z', 0, context)
        if 'scale' in source_metadata:
            scale0 = source_metadata['scale']
            if 'x' in scale0:
                self.pixel_size['x'] = eval_context(scale0, 'x', 1, context)
            if 'y' in scale0:
                self.pixel_size['y'] = eval_context(scale0, 'y', 1, context)
            if 'z' in scale0:
                self.pixel_size['z'] = eval_context(scale0, 'z', 1, context)
        if 'rotation' in source_metadata:
            self.rotation = source_metadata['rotation']

    for shape in self.shapes:
        scale1 = []
        for dim in 'xy':
            index = self.dimension_order.index(dim)
            scale1.append(self.shape[index] / shape[index])
        self.scales.append(np.mean(scale1))
get_channels()
Source code in src\image\DaskSource.py
 97
 98
 99
100
101
102
103
def get_channels(self):
    if len(self.channels) == 0:
        if self.is_rgb:
            return [{'label': ''}]
        else:
            return [{'label': ''}] * self.get_nchannels()
    return self.channels
get_data(level=0)
Source code in src\image\DaskSource.py
105
106
def get_data(self, level=0):
    raise NotImplementedError()
get_nchannels()
Source code in src\image\DaskSource.py
94
95
def get_nchannels(self):
    return self.get_size().get('c', 1)
get_physical_size()
Source code in src\image\DaskSource.py
78
79
80
81
def get_physical_size(self):
    pixel_size = self.get_pixel_size()
    size = self.get_size()
    return {dim: size[dim] * pixel_size[dim] for dim in pixel_size.keys() if dim in size}
get_pixel_size(level=0)
Source code in src\image\DaskSource.py
68
69
70
71
72
73
74
75
76
def get_pixel_size(self, level=0):
    # pixel size in micrometers
    if self.pixel_sizes:
        pixel_size = get_value_units_micrometer(self.pixel_sizes[level])
    else:
        scale = self.scales[level]
        pixel_size0 = get_value_units_micrometer(self.pixel_size)
        pixel_size = {dim: size * scale for dim, size in pixel_size0.items()}
    return pixel_size
get_position(level=0)
Source code in src\image\DaskSource.py
83
84
85
86
87
88
def get_position(self, level=0):
    # position in micrometers
    if self.positions:
        return get_value_units_micrometer(self.positions[level])
    else:
        return get_value_units_micrometer(self.position)
get_rotation()
Source code in src\image\DaskSource.py
90
91
92
def get_rotation(self):
    # rotation in degrees
    return self.rotation
get_shape(level=0)
Source code in src\image\DaskSource.py
60
61
62
def get_shape(self, level=0):
    # shape in pixels
    return self.shapes[level]
get_size(level=0)
Source code in src\image\DaskSource.py
64
65
66
def get_size(self, level=0):
    # size in pixels
    return {dim: size for dim, size in zip(self.dimension_order, self.get_shape(level))}
init_metadata()
Source code in src\image\DaskSource.py
26
27
def init_metadata(self):
    raise NotImplementedError("Dask source should implement init_metadata() to initialize metadata")

TiffDaskSource

TiffDaskSource

Bases: DaskSource

Source code in src\image\TiffDaskSource.py
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
class TiffDaskSource(DaskSource):
    def init_metadata(self):
        tiff = tifffile.TiffFile(self.filename)
        pages = []
        if tiff.series and not tiff.is_mmstack:
            for level in tiff.series[0].levels:
                pages.append(level.pages[0])
        if len(pages) == 0:
            pages = tiff.pages
        page0 = pages[0]
        self.shapes = [page.shape for page in pages]
        self.shape = self.shapes[0]
        self.dtype = page0.dtype.type
        self.dimension_order = page0.axes.lower()
        photometric = page0.keyframe.photometric
        nchannels = self.get_nchannels()
        self.is_rgb = (photometric in (PHOTOMETRIC.RGB, PHOTOMETRIC.PALETTE) and nchannels in (3, 4))

        pixel_size = {}
        position = {}
        rotation = None
        channels = []
        if tiff.is_ome and tiff.ome_metadata is not None:
            xml_metadata = tiff.ome_metadata
            self.metadata = tifffile.xml2dict(xml_metadata)
            if 'OME' in self.metadata:
                self.metadata = self.metadata['OME']

                images = ensure_list(self.metadata.get('Image', {}))[0]
                pixels = images.get('Pixels', {})
                size = float(pixels.get('PhysicalSizeX', 0))
                if size:
                    pixel_size['x'] = (size, pixels.get('PhysicalSizeXUnit', self.default_physical_unit))
                size = float(pixels.get('PhysicalSizeY', 0))
                if size:
                    pixel_size['y'] = (size, pixels.get('PhysicalSizeYUnit', self.default_physical_unit))
                size = float(pixels.get('PhysicalSizeZ', 0))
                if size:
                    pixel_size['z'] =  (size, pixels.get('PhysicalSizeZUnit', self.default_physical_unit))

                for plane in ensure_list(pixels.get('Plane', [])):
                    if 'PositionX' in plane:
                        position['x'] = (float(plane.get('PositionX')), plane.get('PositionXUnit', self.default_physical_unit))
                    if 'PositionY' in plane:
                        position['y'] = (float(plane.get('PositionY')), plane.get('PositionYUnit', self.default_physical_unit))
                    if 'PositionZ' in plane:
                        position['z'] = (float(plane.get('PositionZ')), plane.get('PositionZUnit', self.default_physical_unit))
                    # c, z, t = plane.get('TheC'), plane.get('TheZ'), plane.get('TheT')

                annotations = self.metadata.get('StructuredAnnotations')
                if annotations is not None:
                    if not isinstance(annotations, (list, tuple)):
                        annotations = [annotations]
                    for annotation_item in annotations:
                        for annotations2 in annotation_item.values():
                            if not isinstance(annotations2, (list, tuple)):
                                annotations2 = [annotations2]
                            for annotation in annotations2:
                                value = annotation.get('Value')
                                unit = None
                                if isinstance(value, dict) and 'Modulo' in value:
                                    modulo = value.get('Modulo', {}).get('ModuloAlongZ', {})
                                    unit = modulo.get('Unit')
                                    value = modulo.get('Label')
                                elif isinstance(value, str) and value.lower().startswith('angle'):
                                    if ':' in value:
                                        value = value.split(':')[1].split()
                                    elif '=' in value:
                                        value = value.split('=')[1].split()
                                    else:
                                        value = value.split()[1:]
                                    if len(value) >= 2:
                                        unit = value[1]
                                    value = value[0]
                                else:
                                    value = None
                                if value is not None:
                                    rotation = float(value)
                                    if 'rad' in unit.lower():
                                        rotation = np.rad2deg(rotation)

                for channel0 in ensure_list(pixels.get('Channel', [])):
                    channel = {'label': channel0.get('Name', '')}
                    color = channel0.get('Color')
                    if color:
                        channel['color'] = int_to_rgba(int(color))
                    channels.append(channel)
        self.pixel_size = pixel_size
        self.position = position
        self.rotation = rotation
        self.channels = channels

    def get_data(self, level=0):
        lazy_array = dask.delayed(tifffile.imread)(self.filename, level=level)
        dask_data = dask.array.from_delayed(lazy_array, shape=self.shapes[level], dtype=self.dtype)
        return dask_data
get_data(level=0)
Source code in src\image\TiffDaskSource.py
104
105
106
107
def get_data(self, level=0):
    lazy_array = dask.delayed(tifffile.imread)(self.filename, level=level)
    dask_data = dask.array.from_delayed(lazy_array, shape=self.shapes[level], dtype=self.dtype)
    return dask_data
init_metadata()
Source code in src\image\TiffDaskSource.py
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
def init_metadata(self):
    tiff = tifffile.TiffFile(self.filename)
    pages = []
    if tiff.series and not tiff.is_mmstack:
        for level in tiff.series[0].levels:
            pages.append(level.pages[0])
    if len(pages) == 0:
        pages = tiff.pages
    page0 = pages[0]
    self.shapes = [page.shape for page in pages]
    self.shape = self.shapes[0]
    self.dtype = page0.dtype.type
    self.dimension_order = page0.axes.lower()
    photometric = page0.keyframe.photometric
    nchannels = self.get_nchannels()
    self.is_rgb = (photometric in (PHOTOMETRIC.RGB, PHOTOMETRIC.PALETTE) and nchannels in (3, 4))

    pixel_size = {}
    position = {}
    rotation = None
    channels = []
    if tiff.is_ome and tiff.ome_metadata is not None:
        xml_metadata = tiff.ome_metadata
        self.metadata = tifffile.xml2dict(xml_metadata)
        if 'OME' in self.metadata:
            self.metadata = self.metadata['OME']

            images = ensure_list(self.metadata.get('Image', {}))[0]
            pixels = images.get('Pixels', {})
            size = float(pixels.get('PhysicalSizeX', 0))
            if size:
                pixel_size['x'] = (size, pixels.get('PhysicalSizeXUnit', self.default_physical_unit))
            size = float(pixels.get('PhysicalSizeY', 0))
            if size:
                pixel_size['y'] = (size, pixels.get('PhysicalSizeYUnit', self.default_physical_unit))
            size = float(pixels.get('PhysicalSizeZ', 0))
            if size:
                pixel_size['z'] =  (size, pixels.get('PhysicalSizeZUnit', self.default_physical_unit))

            for plane in ensure_list(pixels.get('Plane', [])):
                if 'PositionX' in plane:
                    position['x'] = (float(plane.get('PositionX')), plane.get('PositionXUnit', self.default_physical_unit))
                if 'PositionY' in plane:
                    position['y'] = (float(plane.get('PositionY')), plane.get('PositionYUnit', self.default_physical_unit))
                if 'PositionZ' in plane:
                    position['z'] = (float(plane.get('PositionZ')), plane.get('PositionZUnit', self.default_physical_unit))
                # c, z, t = plane.get('TheC'), plane.get('TheZ'), plane.get('TheT')

            annotations = self.metadata.get('StructuredAnnotations')
            if annotations is not None:
                if not isinstance(annotations, (list, tuple)):
                    annotations = [annotations]
                for annotation_item in annotations:
                    for annotations2 in annotation_item.values():
                        if not isinstance(annotations2, (list, tuple)):
                            annotations2 = [annotations2]
                        for annotation in annotations2:
                            value = annotation.get('Value')
                            unit = None
                            if isinstance(value, dict) and 'Modulo' in value:
                                modulo = value.get('Modulo', {}).get('ModuloAlongZ', {})
                                unit = modulo.get('Unit')
                                value = modulo.get('Label')
                            elif isinstance(value, str) and value.lower().startswith('angle'):
                                if ':' in value:
                                    value = value.split(':')[1].split()
                                elif '=' in value:
                                    value = value.split('=')[1].split()
                                else:
                                    value = value.split()[1:]
                                if len(value) >= 2:
                                    unit = value[1]
                                value = value[0]
                            else:
                                value = None
                            if value is not None:
                                rotation = float(value)
                                if 'rad' in unit.lower():
                                    rotation = np.rad2deg(rotation)

            for channel0 in ensure_list(pixels.get('Channel', [])):
                channel = {'label': channel0.get('Name', '')}
                color = channel0.get('Color')
                if color:
                    channel['color'] = int_to_rgba(int(color))
                channels.append(channel)
    self.pixel_size = pixel_size
    self.position = position
    self.rotation = rotation
    self.channels = channels

Video

Video

Source code in src\image\Video.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
class Video:
    def __init__(self, filename, fps=1, scale=1, size=None):
        self.filename = filename
        self.fps = fps
        self.scale = scale
        self.size = size
        self.min = 0
        self.range = 1
        self.vidwriter = None

    def write(self, frame):
        frame = frame.squeeze()
        if self.scale != 1:
            frame = cv.resize(np.asarray(frame), None, fx=self.scale, fy=self.scale)
        if self.vidwriter is None:
            if self.size is None:
                height, width = frame.shape[:2]
                self.size = (width, height)
            self.vidwriter = cv.VideoWriter(self.filename, -1, self.fps, self.size)
        frame = image_reshape(frame, self.size)
        self.vidwriter.write(np.asarray(frame))

    def close(self):
        if self.vidwriter is not None:
            self.vidwriter.release()
filename = filename instance-attribute
fps = fps instance-attribute
min = 0 instance-attribute
range = 1 instance-attribute
scale = scale instance-attribute
size = size instance-attribute
vidwriter = None instance-attribute
__init__(filename, fps=1, scale=1, size=None)
Source code in src\image\Video.py
 8
 9
10
11
12
13
14
15
def __init__(self, filename, fps=1, scale=1, size=None):
    self.filename = filename
    self.fps = fps
    self.scale = scale
    self.size = size
    self.min = 0
    self.range = 1
    self.vidwriter = None
close()
Source code in src\image\Video.py
29
30
31
def close(self):
    if self.vidwriter is not None:
        self.vidwriter.release()
write(frame)
Source code in src\image\Video.py
17
18
19
20
21
22
23
24
25
26
27
def write(self, frame):
    frame = frame.squeeze()
    if self.scale != 1:
        frame = cv.resize(np.asarray(frame), None, fx=self.scale, fy=self.scale)
    if self.vidwriter is None:
        if self.size is None:
            height, width = frame.shape[:2]
            self.size = (width, height)
        self.vidwriter = cv.VideoWriter(self.filename, -1, self.fps, self.size)
    frame = image_reshape(frame, self.size)
    self.vidwriter.write(np.asarray(frame))

apply_transform(points, transform)

Source code in src\util.py
371
372
373
374
375
376
377
378
379
def apply_transform(points, transform):
    new_points = []
    for point in points:
        point_len = len(point)
        while len(point) < len(transform):
            point = list(point) + [1]
        new_point = np.dot(point, np.transpose(np.array(transform)))
        new_points.append(new_point[:point_len])
    return new_points

blur_image(image, sigma)

Source code in src\image\util.py
432
433
434
435
436
437
438
439
440
def blur_image(image, sigma):
    nchannels = image.shape[2] if image.ndim == 3 else 1
    if nchannels not in [1, 3]:
        new_image = np.zeros_like(image)
        for channeli in range(nchannels):
            new_image[..., channeli] = blur_image_single(image[..., channeli], sigma)
    else:
        new_image = blur_image_single(image, sigma)
    return new_image

blur_image_single(image, sigma)

Source code in src\image\util.py
428
429
def blur_image_single(image, sigma):
    return gaussian_filter(image, sigma)

calc_foreground_map(sims)

Source code in src\image\util.py
564
565
566
567
568
569
570
571
572
573
574
575
576
577
def calc_foreground_map(sims):
    if len(sims) <= 2:
        return [True] * len(sims)
    sims = [sim.squeeze().astype(np.float32) for sim in sims]
    median_image = calc_images_median(sims).astype(np.float32)
    difs = [np.mean(np.abs(sim - median_image), (0, 1)) for sim in sims]
    # or use stddev instead of mean?
    threshold = np.mean(difs, 0)
    #threshold, _ = cv.threshold(np.array(difs).astype(np.uint16), 0, 1, cv.THRESH_OTSU)
    #threshold, foregrounds = filter_noise_images(channel_images)
    map = (difs > threshold)
    if np.all(map == False):
        return [True] * len(sims)
    return map

calc_images_median(images)

Source code in src\image\util.py
443
444
445
446
def calc_images_median(images):
    out_image = np.zeros(shape=images[0].shape, dtype=images[0].dtype)
    median_image = np.median(images, 0, out_image)
    return median_image

calc_images_quantiles(images, quantiles)

Source code in src\image\util.py
449
450
451
def calc_images_quantiles(images, quantiles):
    quantile_images = [image.astype(np.float32) for image in np.quantile(images, quantiles, 0)]
    return quantile_images

calc_output_properties(sims, transform_key, z_scale=None)

Source code in src\image\util.py
627
628
629
630
631
632
633
634
635
636
637
def calc_output_properties(sims, transform_key, z_scale=None):
    output_spacing = si_utils.get_spacing_from_sim(sims[0])
    if z_scale is not None:
        output_spacing['z'] = z_scale
    output_properties = fusion.calc_fusion_stack_properties(
        sims,
        [si_utils.get_affine_from_sim(sim, transform_key) for sim in sims],
        output_spacing,
        mode='union',
    )
    return output_properties

calc_pyramid(xyzct, npyramid_add=0, pyramid_downsample=2, volumetric_resize=False)

Source code in src\image\util.py
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def calc_pyramid(xyzct: tuple, npyramid_add: int = 0, pyramid_downsample: float = 2,
                 volumetric_resize: bool = False) -> list:
    x, y, z, c, t = xyzct
    if volumetric_resize and z > 1:
        size = (x, y, z)
    else:
        size = (x, y)
    sizes_add = []
    scale = 1
    for _ in range(npyramid_add):
        scale /= pyramid_downsample
        scaled_size = np.maximum(np.round(np.multiply(size, scale)).astype(int), 1)
        sizes_add.append(scaled_size)
    return sizes_add

check_round_significants(a, significant_digits)

Source code in src\util.py
113
114
115
116
117
118
119
120
121
def check_round_significants(a: float, significant_digits: int) -> float:
    rounded = round_significants(a, significant_digits)
    if a != 0:
        dif = 1 - rounded / a
    else:
        dif = rounded - a
    if abs(dif) < 10 ** -significant_digits:
        return rounded
    return a

color_image(image)

Source code in src\image\util.py
42
43
44
45
46
47
def color_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 1:
        return cv.cvtColor(np.array(image), cv.COLOR_GRAY2RGB)
    else:
        return image

combine_transforms(transforms)

Source code in src\image\util.py
709
710
711
712
713
714
715
716
def combine_transforms(transforms):
    combined_transform = None
    for transform in transforms:
        if combined_transform is None:
            combined_transform = transform
        else:
            combined_transform = np.dot(transform, combined_transform)
    return combined_transform

convert_image_sign_type(image, target_dtype)

Source code in src\image\util.py
 97
 98
 99
100
101
102
103
104
105
106
107
def convert_image_sign_type(image: np.ndarray, target_dtype: np.dtype) -> np.ndarray:
    source_dtype = image.dtype
    if source_dtype.kind == target_dtype.kind:
        new_image = image
    elif source_dtype.kind == 'i':
        new_image = ensure_unsigned_image(image)
    else:
        # conversion without overhead
        offset = 2 ** (8 * target_dtype.itemsize - 1)
        new_image = (image - offset).astype(target_dtype)
    return new_image

convert_rational_value(value)

Source code in src\util.py
321
322
323
324
325
326
327
def convert_rational_value(value) -> float:
    if value is not None and isinstance(value, tuple):
        if value[0] == value[1]:
            value = value[0]
        else:
            value = value[0] / value[1]
    return value

convert_to_um(value, unit)

Source code in src\util.py
310
311
312
313
314
315
316
317
318
def convert_to_um(value, unit):
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    return value * conversions.get(unit, 1)

create_compression_filter(compression)

Source code in src\image\util.py
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
def create_compression_filter(compression: list) -> tuple:
    compressor, compression_filters = None, None
    compression = ensure_list(compression)
    if compression is not None and len(compression) > 0:
        compression_type = compression[0].lower()
        if len(compression) > 1:
            level = int(compression[1])
        else:
            level = None
        if 'lzw' in compression_type:
            from imagecodecs.numcodecs import Lzw
            compression_filters = [Lzw()]
        elif '2k' in compression_type or '2000' in compression_type:
            from imagecodecs.numcodecs import Jpeg2k
            compression_filters = [Jpeg2k(level=level)]
        elif 'jpegls' in compression_type:
            from imagecodecs.numcodecs import Jpegls
            compression_filters = [Jpegls(level=level)]
        elif 'jpegxr' in compression_type:
            from imagecodecs.numcodecs import Jpegxr
            compression_filters = [Jpegxr(level=level)]
        elif 'jpegxl' in compression_type:
            from imagecodecs.numcodecs import Jpegxl
            compression_filters = [Jpegxl(level=level)]
        else:
            compressor = compression
    return compressor, compression_filters

create_transform(center, angle, matrix_size=3)

Source code in src\util.py
356
357
358
359
360
361
362
363
364
365
366
367
368
def create_transform(center, angle, matrix_size=3):
    if isinstance(center, dict):
        center = dict_to_xyz(center)
    if len(center) == 2:
        center = np.array(list(center) + [0])
    if angle is None:
        angle = 0
    r = Rotation.from_euler('z', angle, degrees=True)
    t = center - r.apply(center, inverse=True)
    transform = np.eye(matrix_size)
    transform[:3, :3] = np.transpose(r.as_matrix())
    transform[:3, -1] += t
    return transform

create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0))

Source code in src\util.py
348
349
350
351
352
353
def create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0)):
    transform = cv.getRotationMatrix2D(center[:2], angle, scale)
    transform[:, 2] += translate
    if len(transform) == 2:
        transform = np.vstack([transform, [0, 0, 1]])   # create 3x3 matrix
    return transform

desc_to_dict(desc)

Source code in src\util.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def desc_to_dict(desc: str) -> dict:
    desc_dict = {}
    if desc.startswith('{'):
        try:
            metadata = ast.literal_eval(desc)
            return metadata
        except:
            pass
    for item in re.split(r'[\r\n\t|]', desc):
        item_sep = '='
        if ':' in item:
            item_sep = ':'
        if item_sep in item:
            items = item.split(item_sep)
            key = items[0].strip()
            value = items[1].strip()
            for dtype in (int, float, bool):
                try:
                    value = dtype(value)
                    break
                except:
                    pass
            desc_dict[key] = value
    return desc_dict

detect_area_points(image)

Source code in src\image\util.py
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
def detect_area_points(image):
    method = cv.THRESH_OTSU
    threshold = -5
    contours = []
    while len(contours) <= 1 and threshold <= 255:
        _, binimage = cv.threshold(np.array(uint8_image(image)), threshold, 255, method)
        contours0 = cv.findContours(binimage, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
        contours = contours0[0] if len(contours0) == 2 else contours0[1]
        method = cv.THRESH_BINARY
        threshold += 5
    area_contours = [(contour, cv.contourArea(contour)) for contour in contours]
    area_contours.sort(key=lambda contour_area: contour_area[1], reverse=True)
    min_area = max(np.mean([area for contour, area in area_contours]), 1)
    area_points = [(get_center(contour), area) for contour, area in area_contours if area > min_area]

    #image = cv.cvtColor(image, cv.COLOR_GRAY2BGR)
    #for point in area_points:
    #    radius = int(np.round(np.sqrt(point[1]/np.pi)))
    #    cv.circle(image, tuple(np.round(point[0]).astype(int)), radius, (255, 0, 0), -1)
    #show_image(image)
    return area_points

dict_to_xyz(dct, keys='xyz')

Source code in src\util.py
452
453
def dict_to_xyz(dct, keys='xyz'):
    return [dct[key] for key in keys if key in dct]

dir_regex(pattern)

Source code in src\util.py
141
142
143
144
145
146
def dir_regex(pattern):
    files = []
    for pattern_item in ensure_list(pattern):
        files.extend(glob.glob(pattern_item, recursive=True))
    files_sorted = sorted(files, key=lambda file: find_all_numbers(get_filetitle(file)))
    return files_sorted

draw_edge_filter(bounds)

Source code in src\util.py
496
497
498
499
500
501
502
503
504
def draw_edge_filter(bounds):
    out_image = np.zeros(np.flip(bounds))
    y, x = np.where(out_image == 0)
    points = np.transpose([x, y])

    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) * 10, 0, 1)
    return position_weights.reshape(np.flip(bounds))

draw_keypoints(image, points, color=(255, 0, 0))

Source code in src\image\util.py
279
280
281
282
283
284
def draw_keypoints(image, points, color=(255, 0, 0)):
    out_image = color_image(float2int_image(image))
    for point in points:
        point = np.round(point).astype(int)
        cv.drawMarker(out_image, tuple(point), color=color, markerType=cv.MARKER_CROSS, markerSize=5, thickness=1)
    return out_image

draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None, points_color='black', match_color='red', inlier_color='lime', show_plot=True, output_filename=None)

Source code in src\image\util.py
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
def draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None,
                           points_color='black', match_color='red', inlier_color='lime',
                           show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape = np.max([image.shape for image in [image1, image2]], axis=0)
    shape_y, shape_x = shape[:2]
    if shape_x > 2 * shape_y:
        merge_axis = 0
        offset2 = [shape_y, 0]
    else:
        merge_axis = 1
        offset2 = [0, shape_x]
    image = np.concatenate([
        np.pad(image1, ((0, shape[0] - image1.shape[0]), (0, shape[1] - image1.shape[1]))),
        np.pad(image2, ((0, shape[0] - image2.shape[0]), (0, shape[1] - image2.shape[1])))
    ], axis=merge_axis)
    ax.imshow(image, cmap='gray')

    ax.scatter(
        points1[:, 1],
        points1[:, 0],
        facecolors='none',
        edgecolors=points_color,
    )
    ax.scatter(
        points2[:, 1] + offset2[1],
        points2[:, 0] + offset2[0],
        facecolors='none',
        edgecolors=points_color,
    )

    for i, match in enumerate(matches):
        color = match_color
        if i < len(inliers) and inliers[i]:
            color = inlier_color
        index1, index2 = match
        ax.plot(
            (points1[index1, 1], points2[index2, 1] + offset2[1]),
            (points1[index1, 0], points2[index2, 0] + offset2[0]),
            '-', linewidth=1, alpha=0.5, color=color,
        )

    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

    return fig, ax

draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None, color=(255, 0, 0), inlier_color=(0, 255, 0), radius=15, thickness=2)

Source code in src\image\util.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
def draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None,
                              color=(255, 0, 0), inlier_color=(0, 255, 0), radius = 15, thickness = 2):
    # based on https://gist.github.com/woolpeeker/d7e1821e1b5c556b32aafe10b7a1b7e8
    image1 = uint8_image(image1)
    image2 = uint8_image(image2)
    # We're drawing them side by side.  Get dimensions accordingly.
    new_shape = (max(image1.shape[0], image2.shape[0]), image1.shape[1] + image2.shape[1], 3)
    out_image = np.zeros(new_shape, image1.dtype)
    # Place images onto the new image.
    out_image[0:image1.shape[0], 0:image1.shape[1]] = color_image(image1)
    out_image[0:image2.shape[0], image1.shape[1]:image1.shape[1] + image2.shape[1]] = color_image(image2)

    if matches is not None:
        # Draw lines between matches.  Make sure to offset kp coords in second image appropriately.
        for index, match in enumerate(matches):
            if inliers is not None and inliers[index]:
                line_color = inlier_color
            else:
                line_color = color
            # So the keypoint locs are stored as a tuple of floats.  cv2.line() wants locs as a tuple of ints.
            end1 = tuple(np.round(points1[match[0]]).astype(int))
            end2 = tuple(np.round(points2[match[1]]).astype(int) + np.array([image1.shape[1], 0]))
            cv.line(out_image, end1, end2, line_color, thickness)
            cv.circle(out_image, end1, radius, line_color, thickness)
            cv.circle(out_image, end2, radius, line_color, thickness)
    else:
        # Draw all points if no matches are provided.
        for point in points1:
            point = tuple(np.round(point).astype(int))
            cv.circle(out_image, point, radius, color, thickness)
        for point in points2:
            point = tuple(np.round(point).astype(int) + np.array([image1.shape[1], 0]))
            cv.circle(out_image, point, radius, color, thickness)
    return out_image

draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None, show_plot=True, output_filename=None)

Source code in src\image\util.py
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
def draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None,
                              show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape_y, shape_x = image1.shape[:2]
    if shape_x > 2 * shape_y:
        alignment = 'vertical'
    else:
        alignment = 'horizontal'
    plot_matched_features(
        image1,
        image2,
        keypoints0=points1,
        keypoints1=points2,
        matches=matches,
        ax=ax,
        alignment=alignment,
        only_matches=True,
    )
    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

ensure_list(x)

Source code in src\util.py
18
19
20
21
22
23
24
def ensure_list(x) -> list:
    if x is None:
        return []
    elif isinstance(x, list):
        return x
    else:
        return [x]

ensure_unsigned_image(image)

Source code in src\image\util.py
85
86
87
88
89
90
91
92
93
94
def ensure_unsigned_image(image: np.ndarray) -> np.ndarray:
    source_dtype = image.dtype
    dtype = ensure_unsigned_type(source_dtype)
    if dtype != source_dtype:
        # conversion without overhead
        offset = 2 ** (8 * dtype.itemsize - 1)
        new_image = image.astype(dtype) + offset
    else:
        new_image = image
    return new_image

ensure_unsigned_type(dtype)

Source code in src\image\util.py
78
79
80
81
82
def ensure_unsigned_type(dtype: np.dtype) -> np.dtype:
    new_dtype = dtype
    if dtype.kind == 'i' or dtype.byteorder == '>' or dtype.byteorder == '<':
        new_dtype = np.dtype(f'u{dtype.itemsize}')
    return new_dtype

eval_context(data, key, default_value, context)

Source code in src\util.py
266
267
268
269
270
271
272
273
274
275
276
277
def eval_context(data, key, default_value, context):
    value = data.get(key, default_value)
    if isinstance(value, str):
        try:
            value = value.format_map(context)
        except:
            pass
        try:
            value = eval(value, context)
        except:
            pass
    return value

export_csv(filename, data, header=None)

Source code in src\util.py
602
603
604
605
606
607
608
def export_csv(filename, data, header=None):
    with open(filename, 'w', encoding='utf8', newline='') as file:
        csvwriter = csv.writer(file)
        if header is not None:
            csvwriter.writerow(header)
        for row in data:
            csvwriter.writerow(row)

export_json(filename, data)

Source code in src\util.py
591
592
593
def export_json(filename, data):
    with open(filename, 'w', encoding='utf8') as file:
        json.dump(data, file, indent=4)

filter_dict(dict0)

Source code in src\util.py
38
39
40
41
42
43
44
45
46
47
48
49
50
def filter_dict(dict0: dict) -> dict:
    new_dict = {}
    for key, value0 in dict0.items():
        if value0 is not None:
            values = []
            for value in ensure_list(value0):
                if isinstance(value, dict):
                    value = filter_dict(value)
                values.append(value)
            if len(values) == 1:
                values = values[0]
            new_dict[key] = values
    return new_dict

filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5)

Source code in src\util.py
487
488
489
490
491
492
493
def filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5):
    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) / filter_factor, 0, 1)
    order_weights = 1 - np.array(range(len(points))) / len(points) / 2
    weights = position_weights * order_weights
    return weights > threshold

filter_noise_images(images)

Source code in src\image\util.py
508
509
510
511
512
513
514
def filter_noise_images(images):
    dtype = images[0].dtype
    maxval = 2 ** (8 * dtype.itemsize) - 1
    image_vars = [np.asarray(np.std(image)).item() for image in images]
    threshold, mask0 = cv.threshold(np.array(image_vars).astype(dtype), 0, maxval, cv.THRESH_OTSU)
    mask = [flag.item() for flag in mask0.astype(bool)]
    return int(threshold), mask

find_all_numbers(text)

Source code in src\util.py
149
150
def find_all_numbers(text: str) -> list:
    return list(map(int, re.findall(r'\d+', text)))

float2int_image(image, target_dtype=np.dtype(np.uint8))

Source code in src\image\util.py
59
60
61
62
63
64
65
def float2int_image(image, target_dtype=np.dtype(np.uint8)):
    source_dtype = image.dtype
    if source_dtype.kind not in ('i', 'u') and not target_dtype.kind == 'f':
        maxval = 2 ** (8 * target_dtype.itemsize) - 1
        return (image * maxval).astype(target_dtype)
    else:
        return image

get_center(data, offset=(0, 0))

Source code in src\util.py
339
340
341
342
343
344
345
def get_center(data, offset=(0, 0)):
    moments = get_moments(data, offset=offset)
    if moments['m00'] != 0:
        center = get_moments_center(moments)
    else:
        center = np.mean(data, 0).flatten()  # close approximation
    return center.astype(np.float32)

get_center_from_transform(transform)

Source code in src\util.py
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
def get_center_from_transform(transform):
    # from opencv:
    # t0 = (1-alpha) * cx - beta * cy
    # t1 = beta * cx + (1-alpha) * cy
    # where
    # alpha = cos(angle) * scale
    # beta = sin(angle) * scale
    # isolate cx and cy:
    t0, t1 = transform[:2, 2]
    scale = 1
    angle = np.arctan2(transform[0][1], transform[0][0])
    alpha = np.cos(angle) * scale
    beta = np.sin(angle) * scale
    cx = (t1 + t0 * (1 - alpha) / beta) / (beta + (1 - alpha) ** 2 / beta)
    cy = ((1 - alpha) * cx - t0) / beta
    return cx, cy

get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None)

Source code in src\image\util.py
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
def get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None):
    if rotation is None:
        rotation = 0

    if isinstance(data, DataTree):
        sim = msi_utils.get_sim_from_msim(data)
    else:
        sim = data
    sdims = ''.join(si_utils.get_spatial_dims_from_sim(sim))
    sdims = sdims.replace('zyx', 'xyz').replace('yx', 'xy')   # order xy(z)
    origin = si_utils.get_origin_from_sim(sim)
    translation = [origin[sdim] for sdim in sdims]

    if len(translation) == 0:
        translation = [0, 0]
    if len(translation) == 2:
        if translation0 is not None and len (translation0) == 3:
            z = translation0[2]
        else:
            z = 0
        translation = list(translation) + [z]

    if transform is not None:
        translation1, rotation1, _ = get_properties_from_transform(transform, invert=True)
        translation = np.array(translation) + translation1
        rotation += rotation1

    if transform_key is not None:
        transform1 = sim.transforms[transform_key]
        translation1, rotation1, _ = get_properties_from_transform(transform1, invert=True)
        rotation += rotation1

    return translation, rotation

get_default(x, default)

Source code in src\util.py
14
15
def get_default(x, default):
    return default if x is None else x

get_filetitle(filename)

Source code in src\util.py
135
136
137
138
def get_filetitle(filename: str) -> str:
    filebase = os.path.basename(filename)
    title = os.path.splitext(filebase)[0].rstrip('.ome')
    return title

get_image_quantile(image, quantile, axis=None)

Source code in src\image\util.py
454
455
456
def get_image_quantile(image: np.ndarray, quantile: float, axis=None) -> float:
    value = np.quantile(image, quantile, axis=axis).astype(image.dtype)
    return value

get_image_size_info(sizes_xyzct, pixel_nbytes, pixel_type, channels)

Source code in src\image\util.py
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def get_image_size_info(sizes_xyzct: list, pixel_nbytes: int, pixel_type: np.dtype, channels: list) -> str:
    image_size_info = 'XYZCT:'
    size = 0
    for i, size_xyzct in enumerate(sizes_xyzct):
        w, h, zs, cs, ts = size_xyzct
        size += np.int64(pixel_nbytes) * w * h * zs * cs * ts
        if i > 0:
            image_size_info += ','
        image_size_info += f' {w} {h} {zs} {cs} {ts}'
    image_size_info += f' Pixel type: {pixel_type} Uncompressed: {print_hbytes(size)}'
    if sizes_xyzct[0][3] == 3:
        channel_info = 'rgb'
    else:
        channel_info = ','.join([channel.get('Name', '') for channel in channels])
    if channel_info != '':
        image_size_info += f' Channels: {channel_info}'
    return image_size_info

get_image_window(image, low=0.01, high=0.99)

Source code in src\image\util.py
459
460
461
462
463
464
def get_image_window(image, low=0.01, high=0.99):
    window = (
        get_image_quantile(image, low),
        get_image_quantile(image, high)
    )
    return window

get_max_downsamples(shape, npyramid_add, pyramid_downsample)

Source code in src\image\util.py
498
499
500
501
502
503
504
505
def get_max_downsamples(shape, npyramid_add, pyramid_downsample):
    shape = list(shape)
    for i in range(npyramid_add):
        shape[-1] //= pyramid_downsample
        shape[-2] //= pyramid_downsample
        if shape[-1] < 1 or shape[-2] < 1:
            return i
    return npyramid_add

get_mean_nn_distance(points1, points2)

Source code in src\util.py
483
484
def get_mean_nn_distance(points1, points2):
    return np.mean([get_nn_distance(points1), get_nn_distance(points2)])

get_moments(data, offset=(0, 0))

Source code in src\util.py
330
331
332
def get_moments(data, offset=(0, 0)):
    moments = cv.moments((np.array(data) + offset).astype(np.float32))    # doesn't work for float64!
    return moments

get_moments_center(moments, offset=(0, 0))

Source code in src\util.py
335
336
def get_moments_center(moments, offset=(0, 0)):
    return np.array([moments['m10'], moments['m01']]) / moments['m00'] + np.array(offset)

get_nn_distance(points0)

Source code in src\util.py
472
473
474
475
476
477
478
479
480
def get_nn_distance(points0):
    points = list(set(map(tuple, points0)))     # get unique points
    if len(points) >= 2:
        tree = KDTree(points, leaf_size=2)
        dist, ind = tree.query(points, k=2)
        nn_distance = np.median(dist[:, 1])
    else:
        nn_distance = 1
    return nn_distance

get_numpy_slicing(dimension_order, **slicing)

Source code in src\image\util.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
def get_numpy_slicing(dimension_order, **slicing):
    slices = []
    for axis in dimension_order:
        index = slicing.get(axis)
        index0 = slicing.get(axis + '0')
        index1 = slicing.get(axis + '1')
        if index0 is not None and index1 is not None:
            slice1 = slice(int(index0), int(index1))
        elif index is not None:
            slice1 = int(index)
        else:
            slice1 = slice(None)
        slices.append(slice1)
    return tuple(slices)

get_orthogonal_pairs(origins, image_size_um)

Get pairs of orthogonal neighbors from a list of tiles. Tiles don't have to be placed on a regular grid.

Source code in src\util.py
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
def get_orthogonal_pairs(origins, image_size_um):
    """
    Get pairs of orthogonal neighbors from a list of tiles.
    Tiles don't have to be placed on a regular grid.
    """
    pairs = []
    angles = []
    z_positions = [pos[0] for pos in origins if len(pos) == 3]
    ordered_z = sorted(set(z_positions))
    is_mixed_3dstack = len(ordered_z) < len(z_positions)
    for i, j in np.transpose(np.triu_indices(len(origins), 1)):
        origini = np.array(origins[i])
        originj = np.array(origins[j])
        if is_mixed_3dstack:
            # ignore z value for distance
            distance = math.dist(origini[-2:], originj[-2:])
            min_distance = max(image_size_um[-2:])
            z_i, z_j = origini[0], originj[0]
            is_same_z = (z_i == z_j)
            is_close_z = abs(ordered_z.index(z_i) - ordered_z.index(z_j)) <= 1
            if not is_same_z:
                # for tiles in different z stack, require greater overlap
                min_distance *= 0.8
            ok = (distance < min_distance and is_close_z)
        else:
            distance = math.dist(origini, originj)
            min_distance = max(image_size_um)
            ok = (distance < min_distance)
        if ok:
            pairs.append((i, j))
            vector = origini - originj
            angle = math.degrees(math.atan2(vector[1], vector[0]))
            if distance < min(image_size_um):
                angle += 90
            while angle < -90:
                angle += 180
            while angle > 90:
                angle -= 180
            angles.append(angle)
    return pairs, angles

get_properties_from_transform(transform, invert=False)

Source code in src\image\util.py
660
661
662
663
664
665
666
667
668
669
670
671
def get_properties_from_transform(transform, invert=False):
    if len(transform.shape) == 3:
        transform = transform[0]
    if invert:
        transform = param_utils.invert_coordinate_order(transform)
    transform = np.array(transform)
    translation = param_utils.translation_from_affine(transform)
    if len(translation) == 2:
        translation = list(translation) + [0]
    rotation = get_rotation_from_transform(transform)
    scale = get_scale_from_transform(transform)
    return translation, rotation, scale

get_rotation_from_transform(transform)

Source code in src\util.py
427
428
429
def get_rotation_from_transform(transform):
    rotation = np.rad2deg(np.arctan2(transform[0][1], transform[0][0]))
    return rotation

get_scale_from_transform(transform)

Source code in src\util.py
397
398
399
def get_scale_from_transform(transform):
    scale = np.mean(np.linalg.norm(transform, axis=0)[:-1])
    return scale

get_sim_physical_size(sim, invert=False)

Source code in src\image\util.py
620
621
622
623
624
def get_sim_physical_size(sim, invert=False):
    size = si_utils.get_shape_from_sim(sim, asarray=True) * si_utils.get_spacing_from_sim(sim, asarray=True)
    if invert:
        size = np.flip(size)
    return size

get_sim_position_final(sim)

Source code in src\image\util.py
540
541
542
543
544
545
def get_sim_position_final(sim):
    transform_keys = si_utils.get_tranform_keys_from_sim(sim)
    transform = combine_transforms([np.array(si_utils.get_affine_from_sim(sim, transform_key))
                                    for transform_key in transform_keys])
    position = apply_transform([si_utils.get_origin_from_sim(sim, asarray=True)], transform)[0]
    return position

get_sim_shape_2d(sim, transform_key=None)

Source code in src\image\util.py
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
def get_sim_shape_2d(sim, transform_key=None):
    if 't' in sim.coords.xindexes:
        # work-around for points error in get_overlap_bboxes()
        sim1 = si_utils.sim_sel_coords(sim, {'t': 0})
    else:
        sim1 = sim
    stack_props = si_utils.get_stack_properties_from_sim(sim1, transform_key=transform_key)
    vertices = mv_graph.get_vertices_from_stack_props(stack_props)
    if vertices.shape[1] == 3:
        # remove z coordinate
        vertices = vertices[:, 1:]
    if len(vertices) >= 8:
        # remove redundant x/y vertices
        vertices = vertices[:4]
    if len(vertices) >= 4:
        # last 2 vertices appear to be swapped
        vertices[2:] = np.array(list(reversed(vertices[2:])))
    return vertices

get_translation_from_transform(transform)

Source code in src\util.py
402
403
404
405
406
def get_translation_from_transform(transform):
    ndim = len(transform) - 1
    #translation = transform[:ndim, ndim]
    translation = apply_transform([[0] * ndim], transform)[0]
    return translation

get_unique_file_labels(filenames)

Source code in src\util.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def get_unique_file_labels(filenames: list) -> list:
    file_labels = []
    file_parts = []
    label_indices = set()
    last_parts = None
    for filename in filenames:
        parts = split_numeric(filename)
        if len(parts) == 0:
            parts = split_numeric(filename)
            if len(parts) == 0:
                parts = filename
        file_parts.append(parts)
        if last_parts is not None:
            for parti, (part1, part2) in enumerate(zip(last_parts, parts)):
                if part1 != part2:
                    label_indices.add(parti)
        last_parts = parts
    label_indices = sorted(list(label_indices))

    for file_part in file_parts:
        file_label = '_'.join([file_part[i] for i in label_indices])
        file_labels.append(file_label)

    if len(set(file_labels)) < len(file_labels):
        # fallback for duplicate labels
        file_labels = [get_filetitle(filename) for filename in filenames]

    return file_labels

get_value_units_micrometer(value_units0)

Source code in src\util.py
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def get_value_units_micrometer(value_units0: list|dict) -> list|dict|None:
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    if value_units0 is None:
        return None

    if isinstance(value_units0, dict):
        values_um = {}
        for dim, value_unit in value_units0.items():
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um[dim] = value_um
    else:
        values_um = []
        for value_unit in value_units0:
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um.append(value_um)
    return values_um

grayscale_image(image)

Source code in src\image\util.py
32
33
34
35
36
37
38
39
def grayscale_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 4:
        return cv.cvtColor(image, cv.COLOR_RGBA2GRAY)
    elif nchannels > 1:
        return cv.cvtColor(image, cv.COLOR_RGB2GRAY)
    else:
        return image

group_sims_by_z(sims)

Source code in src\image\util.py
548
549
550
551
552
553
554
555
556
557
558
559
560
561
def group_sims_by_z(sims):
    grouped_sims = []
    z_positions = [si_utils.get_origin_from_sim(sim).get('z') for sim in sims]
    is_mixed_3dstack = len(set(z_positions)) < len(z_positions)
    if is_mixed_3dstack:
        sims_by_z = {}
        for simi, z_pos in enumerate(z_positions):
            if z_pos is not None and z_pos not in sims_by_z:
                sims_by_z[z_pos] = []
            sims_by_z[z_pos].append(simi)
        grouped_sims = list(sims_by_z.values())
    if len(grouped_sims) == 0:
        grouped_sims = [list(range(len(sims)))]
    return grouped_sims

image_reshape(image, target_size)

Source code in src\image\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
def image_reshape(image: np.ndarray, target_size: tuple) -> np.ndarray:
    tw, th = target_size
    sh, sw = image.shape[0:2]
    if sw < tw or sh < th:
        dw = max(tw - sw, 0)
        dh = max(th - sh, 0)
        padding = [(dh // 2, dh - dh //  2), (dw // 2, dw - dw // 2)]
        if len(image.shape) == 3:
            padding += [(0, 0)]
        image = np.pad(image, padding, mode='constant', constant_values=(0, 0))
    if tw < sw or th < sh:
        image = image[0:th, 0:tw]
    return image

image_resize(image, target_size0, dimension_order='yxc')

Source code in src\image\util.py
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
def image_resize(image: np.ndarray, target_size0: tuple, dimension_order: str = 'yxc') -> np.ndarray:
    shape = image.shape
    x_index = dimension_order.index('x')
    y_index = dimension_order.index('y')
    c_is_at_end = ('c' in dimension_order and dimension_order.endswith('c'))
    size = shape[x_index], shape[y_index]
    if np.mean(np.divide(size, target_size0)) < 1:
        interpolation = cv.INTER_CUBIC
    else:
        interpolation = cv.INTER_AREA
    dtype0 = image.dtype
    image = ensure_unsigned_image(image)
    target_size = tuple(np.maximum(np.round(target_size0).astype(int), 1))
    if dimension_order in ['yxc', 'yx']:
        new_image = cv.resize(np.asarray(image), target_size, interpolation=interpolation)
    elif dimension_order == 'cyx':
        new_image = np.moveaxis(image, 0, -1)
        new_image = cv.resize(np.asarray(new_image), target_size, interpolation=interpolation)
        new_image = np.moveaxis(new_image, -1, 0)
    else:
        ts = image.shape[dimension_order.index('t')] if 't' in dimension_order else 1
        zs = image.shape[dimension_order.index('z')] if 'z' in dimension_order else 1
        target_shape = list(image.shape).copy()
        target_shape[x_index] = target_size[0]
        target_shape[y_index] = target_size[1]
        new_image = np.zeros(target_shape, dtype=image.dtype)
        for t in range(ts):
            for z in range(zs):
                slices = get_numpy_slicing(dimension_order, z=z, t=t)
                image1 = image[slices]
                if not c_is_at_end:
                    image1 = np.moveaxis(image1, 0, -1)
                new_image1 = np.atleast_3d(cv.resize(np.asarray(image1), target_size, interpolation=interpolation))
                if not c_is_at_end:
                    new_image1 = np.moveaxis(new_image1, -1, 0)
                new_image[slices] = new_image1
    new_image = convert_image_sign_type(new_image, dtype0)
    return new_image

import_csv(filename)

Source code in src\util.py
596
597
598
599
def import_csv(filename):
    with open(filename, encoding='utf8') as file:
        data = csv.reader(file)
    return data

import_json(filename)

Source code in src\util.py
585
586
587
588
def import_json(filename):
    with open(filename, encoding='utf8') as file:
        data = json.load(file)
    return data

import_metadata(content, fields=None, input_path=None)

Source code in src\util.py
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
def import_metadata(content, fields=None, input_path=None):
    # return dict[id] = {values}
    if isinstance(content, str):
        ext = os.path.splitext(content)[1].lower()
        if input_path:
            if isinstance(input_path, list):
                input_path = input_path[0]
            content = os.path.normpath(os.path.join(os.path.dirname(input_path), content))
        if ext == '.csv':
            content = import_csv(content)
        elif ext in ['.json', '.ome.json']:
            content = import_json(content)
    if fields is not None:
        content = [[data[field] for field in fields] for data in content]
    return content

int2float_image(image)

Source code in src\image\util.py
50
51
52
53
54
55
56
def int2float_image(image):
    source_dtype = image.dtype
    if not source_dtype.kind == 'f':
        maxval = 2 ** (8 * source_dtype.itemsize) - 1
        return image / np.float32(maxval)
    else:
        return image

norm_image_quantiles(image0, quantile=0.99)

Source code in src\image\util.py
484
485
486
487
488
489
490
491
492
493
494
495
def norm_image_quantiles(image0, quantile=0.99):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    min_value = np.quantile(image, 1 - quantile)
    max_value = np.quantile(image, quantile)
    normimage = (image - np.mean(image)) / (max_value - min_value)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

norm_image_variance(image0)

Source code in src\image\util.py
472
473
474
475
476
477
478
479
480
481
def norm_image_variance(image0):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    normimage = (image - np.mean(image)) / np.std(image)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

normalise(sims, transform_key, use_global=True)

Source code in src\image\util.py
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
def normalise(sims, transform_key, use_global=True):
    new_sims = []
    dtype = sims[0].dtype
    # global mean and stddev
    if use_global:
        mins = []
        ranges = []
        for sim in sims:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
            #min, max = get_image_window(sim, low=0.01, high=0.99)
            #range = max - min
            mins.append(min)
            ranges.append(range)
        min = np.mean(mins)
        range = np.mean(ranges)
    else:
        min = 0
        range = 1
    # normalise all images
    for sim in sims:
        if not use_global:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
        image = (sim - min) / range
        image = float2int_image(image.clip(0, 1), dtype)    # np.clip(image) is not dask-compatible, use image.clip() instead
        new_sim = si_utils.get_sim_from_array(
            image,
            dims=sim.dims,
            scale=si_utils.get_spacing_from_sim(sim),
            translation=si_utils.get_origin_from_sim(sim),
            transform_key=transform_key,
            affine=si_utils.get_affine_from_sim(sim, transform_key),
            c_coords=sim.c.data,
            t_coords=sim.t.data
        )
        new_sims.append(new_sim)
    return new_sims

normalise_rotated_positions(positions0, rotations0, size, center)

Source code in src\util.py
456
457
458
459
460
461
462
463
464
465
466
467
468
469
def normalise_rotated_positions(positions0, rotations0, size, center):
    # in [xy(z)]
    positions = []
    rotations = []
    _, angles = get_orthogonal_pairs(positions0, size)
    for position0, rotation in zip(positions0, rotations0):
        if rotation is None and len(angles) > 0:
            rotation = -np.mean(angles)
        angle = -rotation if rotation is not None else None
        transform = create_transform(center=center, angle=angle, matrix_size=4)
        position = apply_transform([position0], transform)[0]
        positions.append(position)
        rotations.append(rotation)
    return positions, rotations

normalise_rotation(rotation)

Normalise rotation to be in the range [-180, 180].

Source code in src\util.py
432
433
434
435
436
437
438
439
440
def normalise_rotation(rotation):
    """
    Normalise rotation to be in the range [-180, 180].
    """
    while rotation < -180:
        rotation += 360
    while rotation > 180:
        rotation -= 360
    return rotation

normalise_values(image, min_value, max_value)

Source code in src\image\util.py
467
468
469
def normalise_values(image: np.ndarray, min_value: float, max_value: float) -> np.ndarray:
    image = (image.astype(np.float32) - min_value) / (max_value - min_value)
    return image.clip(0, 1)

pilmode_to_pixelinfo(mode)

Source code in src\image\util.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
def pilmode_to_pixelinfo(mode: str) -> tuple:
    pixelinfo = (np.uint8, 8, 1)
    mode_types = {
        'I': (np.uint32, 32, 1),
        'F': (np.float32, 32, 1),
        'RGB': (np.uint8, 24, 3),
        'RGBA': (np.uint8, 32, 4),
        'CMYK': (np.uint8, 32, 4),
        'YCbCr': (np.uint8, 24, 3),
        'LAB': (np.uint8, 24, 3),
        'HSV': (np.uint8, 24, 3),
    }
    if '16' in mode:
        pixelinfo = (np.uint16, 16, 1)
    elif '32' in mode:
        pixelinfo = (np.uint32, 32, 1)
    elif mode in mode_types:
        pixelinfo = mode_types[mode]
    pixelinfo = (np.dtype(pixelinfo[0]), pixelinfo[1])
    return pixelinfo

points_to_3d(points)

Source code in src\util.py
443
444
def points_to_3d(points):
    return [list(point) + [0] for point in points]

precise_resize(image, factors)

Source code in src\image\util.py
272
273
274
275
276
def precise_resize(image: np.ndarray, factors) -> np.ndarray:
    if image.ndim > len(factors):
        factors = list(factors) + [1]
    new_image = downscale_local_mean(np.asarray(image), tuple(factors)).astype(image.dtype)
    return new_image

print_dict(dct, indent=0)

Source code in src\util.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def print_dict(dct: dict, indent: int = 0) -> str:
    s = ''
    if isinstance(dct, dict):
        for key, value in dct.items():
            s += '\n'
            if not isinstance(value, list):
                s += '\t' * indent + str(key) + ': '
            if isinstance(value, dict):
                s += print_dict(value, indent=indent + 1)
            elif isinstance(value, list):
                for v in value:
                    s += print_dict(v)
            else:
                s += str(value)
    else:
        s += str(dct)
    return s

print_hbytes(nbytes)

Source code in src\util.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def print_hbytes(nbytes: int) -> str:
    exps = ['', 'K', 'M', 'G', 'T', 'P', 'E']
    div = 1024
    exp = 0

    while nbytes > div:
        nbytes /= div
        exp += 1
    if exp < len(exps):
        e = exps[exp]
    else:
        e = f'e{exp * 3}'
    return f'{nbytes:.1f}{e}B'

redimension_data(data, old_order, new_order, **indices)

Source code in src\image\util.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def redimension_data(data, old_order, new_order, **indices):
    # able to provide optional dimension values e.g. t=0, z=0
    if new_order == old_order:
        return data

    new_data = data
    order = old_order
    # remove
    for o in old_order:
        if o not in new_order:
            index = order.index(o)
            dim_value = indices.get(o, 0)
            new_data = np.take(new_data, indices=dim_value, axis=index)
            order = order[:index] + order[index + 1:]
    # add
    for o in new_order:
        if o not in order:
            new_data = np.expand_dims(new_data, 0)
            order = o + order
    # move
    old_indices = [order.index(o) for o in new_order]
    new_indices = list(range(len(new_order)))
    new_data = np.moveaxis(new_data, old_indices, new_indices)
    return new_data

reorder(items, old_order, new_order, default_value=0)

Source code in src\util.py
27
28
29
30
31
32
33
34
35
def reorder(items: list, old_order: str, new_order: str, default_value: int = 0) -> list:
    new_items = []
    for label in new_order:
        if label in old_order:
            item = items[old_order.index(label)]
        else:
            item = default_value
        new_items.append(item)
    return new_items

resize_image(image, new_size)

Source code in src\image\util.py
224
225
226
227
228
229
def resize_image(image, new_size):
    if not isinstance(new_size, (tuple, list, np.ndarray)):
        # use single value for width; apply aspect ratio
        size = np.flip(image.shape[:2])
        new_size = new_size, new_size * size[1] // size[0]
    return cv.resize(image, new_size)

retuple(chunks, shape)

Expand chunks to match shape.

E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028) return (3, 4, 5, 64, 64)

If chunks is an integer, it is applied to all dimensions, to match the behaviour of zarr-python.

Source code in src\util.py
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def retuple(chunks, shape):
    # from ome-zarr-py
    """
    Expand chunks to match shape.

    E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028)
    return (3, 4, 5, 64, 64)

    If chunks is an integer, it is applied to all dimensions, to match
    the behaviour of zarr-python.
    """

    if isinstance(chunks, int):
        return tuple([chunks] * len(shape))

    dims_to_add = len(shape) - len(chunks)
    return *shape[:dims_to_add], *chunks

round_significants(a, significant_digits)

Source code in src\util.py
124
125
126
127
128
def round_significants(a: float, significant_digits: int) -> float:
    if a != 0:
        round_decimals = significant_digits - int(np.floor(np.log10(abs(a)))) - 1
        return round(a, round_decimals)
    return a

show_image(image, title='', cmap=None)

Source code in src\image\util.py
22
23
24
25
26
27
28
29
def show_image(image, title='', cmap=None):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if cmap is None:
        cmap = 'gray' if nchannels == 1 else None
    plt.imshow(image, cmap=cmap)
    if title != '':
        plt.title(title)
    plt.show()

split_num_text(text)

Source code in src\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
def split_num_text(text: str) -> list:
    num_texts = []
    block = ''
    is_num0 = None
    if text is None:
        return None

    for c in text:
        is_num = (c.isnumeric() or c == '.')
        if is_num0 is not None and is_num != is_num0:
            num_texts.append(block)
            block = ''
        block += c
        is_num0 = is_num
    if block != '':
        num_texts.append(block)

    num_texts2 = []
    for block in num_texts:
        block = block.strip()
        try:
            block = float(block)
        except:
            pass
        if block not in [' ', ',', '|']:
            num_texts2.append(block)
    return num_texts2

split_numeric(text)

Source code in src\util.py
153
154
155
156
157
158
159
160
def split_numeric(text: str) -> list:
    num_parts = []
    parts = re.split(r'[_/\\.]', text)
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            num_parts.append(part)
    return num_parts

split_numeric_dict(text)

Source code in src\util.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
def split_numeric_dict(text: str) -> dict:
    num_parts = {}
    parts = re.split(r'[_/\\.]', text)
    parti = 0
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            index = num_span.start()
            label = part[:index]
            if label == '':
                label = parti
            num_parts[label] = num_span.group()
            parti += 1
    return num_parts

split_path(path)

Source code in src\util.py
131
132
def split_path(path: str) -> list:
    return os.path.normpath(path).split(os.path.sep)

split_value_unit_list(text)

Source code in src\util.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
def split_value_unit_list(text: str) -> list:
    value_units = []
    if text is None:
        return None

    items = split_num_text(text)
    if isinstance(items[-1], str):
        def_unit = items[-1]
    else:
        def_unit = ''

    i = 0
    while i < len(items):
        value = items[i]
        if i + 1 < len(items):
            unit = items[i + 1]
        else:
            unit = ''
        if not isinstance(value, str):
            if isinstance(unit, str):
                i += 1
            else:
                unit = def_unit
            value_units.append((value, unit))
        i += 1
    return value_units

uint8_image(image)

Source code in src\image\util.py
68
69
70
71
72
73
74
75
def uint8_image(image):
    source_dtype = image.dtype
    if source_dtype.kind == 'f':
        image = image * 255
    elif source_dtype.itemsize != 1:
        factor = 2 ** (8 * (source_dtype.itemsize - 1))
        image = image // factor
    return image.astype(np.uint8)

validate_transform(transform, max_rotation=None)

Source code in src\util.py
382
383
384
385
386
387
388
389
390
391
392
393
394
def validate_transform(transform, max_rotation=None):
    if transform is None:
        return False
    transform = np.array(transform)
    if np.any(np.isnan(transform)):
        return False
    if np.any(np.isinf(transform)):
        return False
    if np.linalg.det(transform) == 0:
        return False
    if  max_rotation is not None and abs(normalise_rotation(get_rotation_from_transform(transform))) > max_rotation:
        return False
    return True

xyz_to_dict(xyz, axes='xyz')

Source code in src\util.py
447
448
449
def xyz_to_dict(xyz, axes='xyz'):
    dct = {dim: value for dim, value in zip(axes, xyz)}
    return dct

ZarrDaskSource

ZarrDaskSource

Bases: DaskSource

Source code in src\image\ZarrDaskSource.py
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
class ZarrDaskSource(DaskSource):
    def init_metadata(self):
        location = parse_url(self.filename)
        if location is None:
            raise FileNotFoundError(f'Error parsing ome-zarr file {self.filename}')
        if 'bioformats2raw.layout' in location.root_attrs:
            location = parse_url(os.path.join(self.filename, '0'))
            if location is None:
                raise FileNotFoundError(f'Error parsing ome-zarr file {self.filename}')
        reader = Reader(location)
        nodes = list(reader())
        image_node = nodes[0]
        self.data = image_node.data
        self.metadata = image_node.metadata

        self.shapes = [level.shape for level in self.data]
        self.shape = self.shapes[0]
        self.dtype = self.data[0].dtype
        axes = self.metadata['axes']
        self.dimension_order = ''.join([axis['name'] for axis in axes])
        units = {axis['name']: axis['unit'] for axis in axes if 'unit' in axis}

        pixel_sizes = []
        positions = []
        channels = []
        for transforms in self.metadata.get('coordinateTransformations', []):
            pixel_size = {}
            position = {}
            scale1 = []
            position1 = None
            for transform in transforms:
                if transform['type'] == 'scale':
                    scale1 = transform['scale']
                if transform['type'] == 'translation':
                    position1 = transform.get('translation')
            for index, dim in enumerate(self.dimension_order):
                if dim in 'xyz':
                    pixel_size[dim] = convert_to_um(scale1[index], units.get(dim, ''))
                    if position1 is not None:
                        position[dim] = (position1[index], units.get(dim, ''))
                    else:
                        position[dim] = 0
            pixel_sizes.append(pixel_size)
            positions.append(position)

        colormaps = self.metadata['colormap']
        for channeli, channel0 in enumerate(self.metadata['channel_names']):
            channel = {'label': channel0}
            if channeli < len(colormaps):
                channel['color'] = colormaps[channeli][-1]
            channels.append(channel)
        self.pixel_sizes = pixel_sizes
        self.positions = positions
        self.rotation = 0
        self.channels = channels

    def get_data(self, level=0):
        return self.data[level]
get_data(level=0)
Source code in src\image\ZarrDaskSource.py
65
66
def get_data(self, level=0):
    return self.data[level]
init_metadata()
Source code in src\image\ZarrDaskSource.py
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
def init_metadata(self):
    location = parse_url(self.filename)
    if location is None:
        raise FileNotFoundError(f'Error parsing ome-zarr file {self.filename}')
    if 'bioformats2raw.layout' in location.root_attrs:
        location = parse_url(os.path.join(self.filename, '0'))
        if location is None:
            raise FileNotFoundError(f'Error parsing ome-zarr file {self.filename}')
    reader = Reader(location)
    nodes = list(reader())
    image_node = nodes[0]
    self.data = image_node.data
    self.metadata = image_node.metadata

    self.shapes = [level.shape for level in self.data]
    self.shape = self.shapes[0]
    self.dtype = self.data[0].dtype
    axes = self.metadata['axes']
    self.dimension_order = ''.join([axis['name'] for axis in axes])
    units = {axis['name']: axis['unit'] for axis in axes if 'unit' in axis}

    pixel_sizes = []
    positions = []
    channels = []
    for transforms in self.metadata.get('coordinateTransformations', []):
        pixel_size = {}
        position = {}
        scale1 = []
        position1 = None
        for transform in transforms:
            if transform['type'] == 'scale':
                scale1 = transform['scale']
            if transform['type'] == 'translation':
                position1 = transform.get('translation')
        for index, dim in enumerate(self.dimension_order):
            if dim in 'xyz':
                pixel_size[dim] = convert_to_um(scale1[index], units.get(dim, ''))
                if position1 is not None:
                    position[dim] = (position1[index], units.get(dim, ''))
                else:
                    position[dim] = 0
        pixel_sizes.append(pixel_size)
        positions.append(position)

    colormaps = self.metadata['colormap']
    for channeli, channel0 in enumerate(self.metadata['channel_names']):
        channel = {'label': channel0}
        if channeli < len(colormaps):
            channel['color'] = colormaps[channeli][-1]
        channels.append(channel)
    self.pixel_sizes = pixel_sizes
    self.positions = positions
    self.rotation = 0
    self.channels = channels

color_conversion

hexrgb_to_rgba(hexrgb)

Source code in src\image\color_conversion.py
21
22
23
def hexrgb_to_rgba(hexrgb: str) -> list:
    rgba = int_to_rgba(eval('0x' + hexrgb + 'FF'))
    return rgba

int_to_rgba(intrgba)

Source code in src\image\color_conversion.py
3
4
5
6
7
8
def int_to_rgba(intrgba: int) -> list:
    signed = (intrgba < 0)
    rgba = [x / 255 for x in intrgba.to_bytes(4, signed=signed, byteorder="big")]
    if rgba[-1] == 0:
        rgba[-1] = 1
    return rgba

rgba_to_hexrgb(rgba)

Source code in src\image\color_conversion.py
16
17
18
def rgba_to_hexrgb(rgba: tuple|list) -> str:
    hexrgb = ''.join([hex(int(x * 255))[2:].upper().zfill(2) for x in rgba[:3]])
    return hexrgb

rgba_to_int(rgba)

Source code in src\image\color_conversion.py
11
12
13
def rgba_to_int(rgba: tuple|list) -> int:
    intrgba = int.from_bytes([int(x * 255) for x in rgba], signed=True, byteorder="big")
    return intrgba

flatfield

apply_flatfield_correction(sims, transform_key, quantiles, quantile_images)

Source code in src\image\flatfield.py
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
def apply_flatfield_correction(sims, transform_key, quantiles, quantile_images):
    new_sims = []
    sim0 = sims[0]
    dims0 = sim0.dims
    has_c_dim = 'c' in dims0
    dtype = sim0.dtype
    dark = 0
    bright = 1
    for quantile, quantile_image in zip(quantiles, quantile_images):
        if has_c_dim and dims0.index('c') != -1:
            quantile_image = da.moveaxis(quantile_image, dims0.index('c'), -1)
        if quantile <= 0.5:
            dark = quantile_image
        else:
            bright = quantile_image

    bright_dark_range = bright - dark
    if has_c_dim:
        axes = list(range(len(dims0) - 1))   # all accept final 'c' axis
    else:
        axes = None
    mean_bright_dark = np.array(np.mean(bright - dark, axis=axes))

    for sim in sims:
        if has_c_dim:
            image0 = sim.transpose(..., 'c')
        else:
            image0 = sim
        image = float2int_image(image_flatfield_correction(int2float_image(image0), dark, bright_dark_range, mean_bright_dark), dtype)
        if has_c_dim:
            image = image.transpose(*dims0)     # revert to original order
        new_sim = si_utils.get_sim_from_array(
            image,
            dims=sim.dims,
            scale=si_utils.get_spacing_from_sim(sim),
            translation=si_utils.get_origin_from_sim(sim),
            transform_key=transform_key,
            affine=si_utils.get_affine_from_sim(sim, transform_key),
            c_coords=sim.c
        )
        new_sims.append(new_sim)
    return new_sims

apply_transform(points, transform)

Source code in src\util.py
371
372
373
374
375
376
377
378
379
def apply_transform(points, transform):
    new_points = []
    for point in points:
        point_len = len(point)
        while len(point) < len(transform):
            point = list(point) + [1]
        new_point = np.dot(point, np.transpose(np.array(transform)))
        new_points.append(new_point[:point_len])
    return new_points

blur_image(image, sigma)

Source code in src\image\util.py
432
433
434
435
436
437
438
439
440
def blur_image(image, sigma):
    nchannels = image.shape[2] if image.ndim == 3 else 1
    if nchannels not in [1, 3]:
        new_image = np.zeros_like(image)
        for channeli in range(nchannels):
            new_image[..., channeli] = blur_image_single(image[..., channeli], sigma)
    else:
        new_image = blur_image_single(image, sigma)
    return new_image

blur_image_single(image, sigma)

Source code in src\image\util.py
428
429
def blur_image_single(image, sigma):
    return gaussian_filter(image, sigma)

calc_flatfield_images(sims, quantiles, foreground_map=None)

Source code in src\image\flatfield.py
33
34
35
36
37
38
39
40
41
42
def calc_flatfield_images(sims, quantiles, foreground_map=None):
    if foreground_map is not None:
        back_sims = [sim for sim, is_foreground in zip(sims, foreground_map) if not is_foreground]
    else:
        back_sims = sims
    dtype = sims[0].dtype
    maxval = 2 ** (8 * dtype.itemsize) - 1
    flatfield_images = [image.astype(np.float32) / np.float32(maxval)
                        for image in da.quantile(da.asarray(back_sims), quantiles, axis=0)]
    return flatfield_images

calc_foreground_map(sims)

Source code in src\image\util.py
564
565
566
567
568
569
570
571
572
573
574
575
576
577
def calc_foreground_map(sims):
    if len(sims) <= 2:
        return [True] * len(sims)
    sims = [sim.squeeze().astype(np.float32) for sim in sims]
    median_image = calc_images_median(sims).astype(np.float32)
    difs = [np.mean(np.abs(sim - median_image), (0, 1)) for sim in sims]
    # or use stddev instead of mean?
    threshold = np.mean(difs, 0)
    #threshold, _ = cv.threshold(np.array(difs).astype(np.uint16), 0, 1, cv.THRESH_OTSU)
    #threshold, foregrounds = filter_noise_images(channel_images)
    map = (difs > threshold)
    if np.all(map == False):
        return [True] * len(sims)
    return map

calc_images_median(images)

Source code in src\image\util.py
443
444
445
446
def calc_images_median(images):
    out_image = np.zeros(shape=images[0].shape, dtype=images[0].dtype)
    median_image = np.median(images, 0, out_image)
    return median_image

calc_images_quantiles(images, quantiles)

Source code in src\image\util.py
449
450
451
def calc_images_quantiles(images, quantiles):
    quantile_images = [image.astype(np.float32) for image in np.quantile(images, quantiles, 0)]
    return quantile_images

calc_output_properties(sims, transform_key, z_scale=None)

Source code in src\image\util.py
627
628
629
630
631
632
633
634
635
636
637
def calc_output_properties(sims, transform_key, z_scale=None):
    output_spacing = si_utils.get_spacing_from_sim(sims[0])
    if z_scale is not None:
        output_spacing['z'] = z_scale
    output_properties = fusion.calc_fusion_stack_properties(
        sims,
        [si_utils.get_affine_from_sim(sim, transform_key) for sim in sims],
        output_spacing,
        mode='union',
    )
    return output_properties

calc_pyramid(xyzct, npyramid_add=0, pyramid_downsample=2, volumetric_resize=False)

Source code in src\image\util.py
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def calc_pyramid(xyzct: tuple, npyramid_add: int = 0, pyramid_downsample: float = 2,
                 volumetric_resize: bool = False) -> list:
    x, y, z, c, t = xyzct
    if volumetric_resize and z > 1:
        size = (x, y, z)
    else:
        size = (x, y)
    sizes_add = []
    scale = 1
    for _ in range(npyramid_add):
        scale /= pyramid_downsample
        scaled_size = np.maximum(np.round(np.multiply(size, scale)).astype(int), 1)
        sizes_add.append(scaled_size)
    return sizes_add

check_round_significants(a, significant_digits)

Source code in src\util.py
113
114
115
116
117
118
119
120
121
def check_round_significants(a: float, significant_digits: int) -> float:
    rounded = round_significants(a, significant_digits)
    if a != 0:
        dif = 1 - rounded / a
    else:
        dif = rounded - a
    if abs(dif) < 10 ** -significant_digits:
        return rounded
    return a

color_image(image)

Source code in src\image\util.py
42
43
44
45
46
47
def color_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 1:
        return cv.cvtColor(np.array(image), cv.COLOR_GRAY2RGB)
    else:
        return image

combine_transforms(transforms)

Source code in src\image\util.py
709
710
711
712
713
714
715
716
def combine_transforms(transforms):
    combined_transform = None
    for transform in transforms:
        if combined_transform is None:
            combined_transform = transform
        else:
            combined_transform = np.dot(transform, combined_transform)
    return combined_transform

convert_image_sign_type(image, target_dtype)

Source code in src\image\util.py
 97
 98
 99
100
101
102
103
104
105
106
107
def convert_image_sign_type(image: np.ndarray, target_dtype: np.dtype) -> np.ndarray:
    source_dtype = image.dtype
    if source_dtype.kind == target_dtype.kind:
        new_image = image
    elif source_dtype.kind == 'i':
        new_image = ensure_unsigned_image(image)
    else:
        # conversion without overhead
        offset = 2 ** (8 * target_dtype.itemsize - 1)
        new_image = (image - offset).astype(target_dtype)
    return new_image

convert_rational_value(value)

Source code in src\util.py
321
322
323
324
325
326
327
def convert_rational_value(value) -> float:
    if value is not None and isinstance(value, tuple):
        if value[0] == value[1]:
            value = value[0]
        else:
            value = value[0] / value[1]
    return value

convert_to_um(value, unit)

Source code in src\util.py
310
311
312
313
314
315
316
317
318
def convert_to_um(value, unit):
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    return value * conversions.get(unit, 1)

create_compression_filter(compression)

Source code in src\image\util.py
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
def create_compression_filter(compression: list) -> tuple:
    compressor, compression_filters = None, None
    compression = ensure_list(compression)
    if compression is not None and len(compression) > 0:
        compression_type = compression[0].lower()
        if len(compression) > 1:
            level = int(compression[1])
        else:
            level = None
        if 'lzw' in compression_type:
            from imagecodecs.numcodecs import Lzw
            compression_filters = [Lzw()]
        elif '2k' in compression_type or '2000' in compression_type:
            from imagecodecs.numcodecs import Jpeg2k
            compression_filters = [Jpeg2k(level=level)]
        elif 'jpegls' in compression_type:
            from imagecodecs.numcodecs import Jpegls
            compression_filters = [Jpegls(level=level)]
        elif 'jpegxr' in compression_type:
            from imagecodecs.numcodecs import Jpegxr
            compression_filters = [Jpegxr(level=level)]
        elif 'jpegxl' in compression_type:
            from imagecodecs.numcodecs import Jpegxl
            compression_filters = [Jpegxl(level=level)]
        else:
            compressor = compression
    return compressor, compression_filters

create_transform(center, angle, matrix_size=3)

Source code in src\util.py
356
357
358
359
360
361
362
363
364
365
366
367
368
def create_transform(center, angle, matrix_size=3):
    if isinstance(center, dict):
        center = dict_to_xyz(center)
    if len(center) == 2:
        center = np.array(list(center) + [0])
    if angle is None:
        angle = 0
    r = Rotation.from_euler('z', angle, degrees=True)
    t = center - r.apply(center, inverse=True)
    transform = np.eye(matrix_size)
    transform[:3, :3] = np.transpose(r.as_matrix())
    transform[:3, -1] += t
    return transform

create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0))

Source code in src\util.py
348
349
350
351
352
353
def create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0)):
    transform = cv.getRotationMatrix2D(center[:2], angle, scale)
    transform[:, 2] += translate
    if len(transform) == 2:
        transform = np.vstack([transform, [0, 0, 1]])   # create 3x3 matrix
    return transform

desc_to_dict(desc)

Source code in src\util.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def desc_to_dict(desc: str) -> dict:
    desc_dict = {}
    if desc.startswith('{'):
        try:
            metadata = ast.literal_eval(desc)
            return metadata
        except:
            pass
    for item in re.split(r'[\r\n\t|]', desc):
        item_sep = '='
        if ':' in item:
            item_sep = ':'
        if item_sep in item:
            items = item.split(item_sep)
            key = items[0].strip()
            value = items[1].strip()
            for dtype in (int, float, bool):
                try:
                    value = dtype(value)
                    break
                except:
                    pass
            desc_dict[key] = value
    return desc_dict

detect_area_points(image)

Source code in src\image\util.py
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
def detect_area_points(image):
    method = cv.THRESH_OTSU
    threshold = -5
    contours = []
    while len(contours) <= 1 and threshold <= 255:
        _, binimage = cv.threshold(np.array(uint8_image(image)), threshold, 255, method)
        contours0 = cv.findContours(binimage, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
        contours = contours0[0] if len(contours0) == 2 else contours0[1]
        method = cv.THRESH_BINARY
        threshold += 5
    area_contours = [(contour, cv.contourArea(contour)) for contour in contours]
    area_contours.sort(key=lambda contour_area: contour_area[1], reverse=True)
    min_area = max(np.mean([area for contour, area in area_contours]), 1)
    area_points = [(get_center(contour), area) for contour, area in area_contours if area > min_area]

    #image = cv.cvtColor(image, cv.COLOR_GRAY2BGR)
    #for point in area_points:
    #    radius = int(np.round(np.sqrt(point[1]/np.pi)))
    #    cv.circle(image, tuple(np.round(point[0]).astype(int)), radius, (255, 0, 0), -1)
    #show_image(image)
    return area_points

dict_to_xyz(dct, keys='xyz')

Source code in src\util.py
452
453
def dict_to_xyz(dct, keys='xyz'):
    return [dct[key] for key in keys if key in dct]

dir_regex(pattern)

Source code in src\util.py
141
142
143
144
145
146
def dir_regex(pattern):
    files = []
    for pattern_item in ensure_list(pattern):
        files.extend(glob.glob(pattern_item, recursive=True))
    files_sorted = sorted(files, key=lambda file: find_all_numbers(get_filetitle(file)))
    return files_sorted

draw_edge_filter(bounds)

Source code in src\util.py
496
497
498
499
500
501
502
503
504
def draw_edge_filter(bounds):
    out_image = np.zeros(np.flip(bounds))
    y, x = np.where(out_image == 0)
    points = np.transpose([x, y])

    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) * 10, 0, 1)
    return position_weights.reshape(np.flip(bounds))

draw_keypoints(image, points, color=(255, 0, 0))

Source code in src\image\util.py
279
280
281
282
283
284
def draw_keypoints(image, points, color=(255, 0, 0)):
    out_image = color_image(float2int_image(image))
    for point in points:
        point = np.round(point).astype(int)
        cv.drawMarker(out_image, tuple(point), color=color, markerType=cv.MARKER_CROSS, markerSize=5, thickness=1)
    return out_image

draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None, points_color='black', match_color='red', inlier_color='lime', show_plot=True, output_filename=None)

Source code in src\image\util.py
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
def draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None,
                           points_color='black', match_color='red', inlier_color='lime',
                           show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape = np.max([image.shape for image in [image1, image2]], axis=0)
    shape_y, shape_x = shape[:2]
    if shape_x > 2 * shape_y:
        merge_axis = 0
        offset2 = [shape_y, 0]
    else:
        merge_axis = 1
        offset2 = [0, shape_x]
    image = np.concatenate([
        np.pad(image1, ((0, shape[0] - image1.shape[0]), (0, shape[1] - image1.shape[1]))),
        np.pad(image2, ((0, shape[0] - image2.shape[0]), (0, shape[1] - image2.shape[1])))
    ], axis=merge_axis)
    ax.imshow(image, cmap='gray')

    ax.scatter(
        points1[:, 1],
        points1[:, 0],
        facecolors='none',
        edgecolors=points_color,
    )
    ax.scatter(
        points2[:, 1] + offset2[1],
        points2[:, 0] + offset2[0],
        facecolors='none',
        edgecolors=points_color,
    )

    for i, match in enumerate(matches):
        color = match_color
        if i < len(inliers) and inliers[i]:
            color = inlier_color
        index1, index2 = match
        ax.plot(
            (points1[index1, 1], points2[index2, 1] + offset2[1]),
            (points1[index1, 0], points2[index2, 0] + offset2[0]),
            '-', linewidth=1, alpha=0.5, color=color,
        )

    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

    return fig, ax

draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None, color=(255, 0, 0), inlier_color=(0, 255, 0), radius=15, thickness=2)

Source code in src\image\util.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
def draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None,
                              color=(255, 0, 0), inlier_color=(0, 255, 0), radius = 15, thickness = 2):
    # based on https://gist.github.com/woolpeeker/d7e1821e1b5c556b32aafe10b7a1b7e8
    image1 = uint8_image(image1)
    image2 = uint8_image(image2)
    # We're drawing them side by side.  Get dimensions accordingly.
    new_shape = (max(image1.shape[0], image2.shape[0]), image1.shape[1] + image2.shape[1], 3)
    out_image = np.zeros(new_shape, image1.dtype)
    # Place images onto the new image.
    out_image[0:image1.shape[0], 0:image1.shape[1]] = color_image(image1)
    out_image[0:image2.shape[0], image1.shape[1]:image1.shape[1] + image2.shape[1]] = color_image(image2)

    if matches is not None:
        # Draw lines between matches.  Make sure to offset kp coords in second image appropriately.
        for index, match in enumerate(matches):
            if inliers is not None and inliers[index]:
                line_color = inlier_color
            else:
                line_color = color
            # So the keypoint locs are stored as a tuple of floats.  cv2.line() wants locs as a tuple of ints.
            end1 = tuple(np.round(points1[match[0]]).astype(int))
            end2 = tuple(np.round(points2[match[1]]).astype(int) + np.array([image1.shape[1], 0]))
            cv.line(out_image, end1, end2, line_color, thickness)
            cv.circle(out_image, end1, radius, line_color, thickness)
            cv.circle(out_image, end2, radius, line_color, thickness)
    else:
        # Draw all points if no matches are provided.
        for point in points1:
            point = tuple(np.round(point).astype(int))
            cv.circle(out_image, point, radius, color, thickness)
        for point in points2:
            point = tuple(np.round(point).astype(int) + np.array([image1.shape[1], 0]))
            cv.circle(out_image, point, radius, color, thickness)
    return out_image

draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None, show_plot=True, output_filename=None)

Source code in src\image\util.py
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
def draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None,
                              show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape_y, shape_x = image1.shape[:2]
    if shape_x > 2 * shape_y:
        alignment = 'vertical'
    else:
        alignment = 'horizontal'
    plot_matched_features(
        image1,
        image2,
        keypoints0=points1,
        keypoints1=points2,
        matches=matches,
        ax=ax,
        alignment=alignment,
        only_matches=True,
    )
    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

ensure_list(x)

Source code in src\util.py
18
19
20
21
22
23
24
def ensure_list(x) -> list:
    if x is None:
        return []
    elif isinstance(x, list):
        return x
    else:
        return [x]

ensure_unsigned_image(image)

Source code in src\image\util.py
85
86
87
88
89
90
91
92
93
94
def ensure_unsigned_image(image: np.ndarray) -> np.ndarray:
    source_dtype = image.dtype
    dtype = ensure_unsigned_type(source_dtype)
    if dtype != source_dtype:
        # conversion without overhead
        offset = 2 ** (8 * dtype.itemsize - 1)
        new_image = image.astype(dtype) + offset
    else:
        new_image = image
    return new_image

ensure_unsigned_type(dtype)

Source code in src\image\util.py
78
79
80
81
82
def ensure_unsigned_type(dtype: np.dtype) -> np.dtype:
    new_dtype = dtype
    if dtype.kind == 'i' or dtype.byteorder == '>' or dtype.byteorder == '<':
        new_dtype = np.dtype(f'u{dtype.itemsize}')
    return new_dtype

eval_context(data, key, default_value, context)

Source code in src\util.py
266
267
268
269
270
271
272
273
274
275
276
277
def eval_context(data, key, default_value, context):
    value = data.get(key, default_value)
    if isinstance(value, str):
        try:
            value = value.format_map(context)
        except:
            pass
        try:
            value = eval(value, context)
        except:
            pass
    return value

export_csv(filename, data, header=None)

Source code in src\util.py
602
603
604
605
606
607
608
def export_csv(filename, data, header=None):
    with open(filename, 'w', encoding='utf8', newline='') as file:
        csvwriter = csv.writer(file)
        if header is not None:
            csvwriter.writerow(header)
        for row in data:
            csvwriter.writerow(row)

export_json(filename, data)

Source code in src\util.py
591
592
593
def export_json(filename, data):
    with open(filename, 'w', encoding='utf8') as file:
        json.dump(data, file, indent=4)

filter_dict(dict0)

Source code in src\util.py
38
39
40
41
42
43
44
45
46
47
48
49
50
def filter_dict(dict0: dict) -> dict:
    new_dict = {}
    for key, value0 in dict0.items():
        if value0 is not None:
            values = []
            for value in ensure_list(value0):
                if isinstance(value, dict):
                    value = filter_dict(value)
                values.append(value)
            if len(values) == 1:
                values = values[0]
            new_dict[key] = values
    return new_dict

filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5)

Source code in src\util.py
487
488
489
490
491
492
493
def filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5):
    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) / filter_factor, 0, 1)
    order_weights = 1 - np.array(range(len(points))) / len(points) / 2
    weights = position_weights * order_weights
    return weights > threshold

filter_noise_images(images)

Source code in src\image\util.py
508
509
510
511
512
513
514
def filter_noise_images(images):
    dtype = images[0].dtype
    maxval = 2 ** (8 * dtype.itemsize) - 1
    image_vars = [np.asarray(np.std(image)).item() for image in images]
    threshold, mask0 = cv.threshold(np.array(image_vars).astype(dtype), 0, maxval, cv.THRESH_OTSU)
    mask = [flag.item() for flag in mask0.astype(bool)]
    return int(threshold), mask

find_all_numbers(text)

Source code in src\util.py
149
150
def find_all_numbers(text: str) -> list:
    return list(map(int, re.findall(r'\d+', text)))

flatfield_correction(sims, transform_key, quantiles, foreground_map=None, cache_location=None)

Source code in src\image\flatfield.py
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
def flatfield_correction(sims, transform_key, quantiles, foreground_map=None, cache_location=None):
    quantile_images = []
    if cache_location is not None:
        for quantile in quantiles:
            filename = get_quantile_filename(cache_location, quantile)
            if os.path.exists(filename):
                quantile_images.append(load_tiff(filename))

    if len(quantile_images) < len(quantiles):
        quantile_images = calc_flatfield_images(sims, quantiles, foreground_map)
        if cache_location is not None:
            for quantile, quantile_image in zip(quantiles, quantile_images):
                filename = get_quantile_filename(cache_location, quantile)
                save_tiff(filename, quantile_image)

    return apply_flatfield_correction(sims, transform_key, quantiles, quantile_images)

float2int_image(image, target_dtype=np.dtype(np.uint8))

Source code in src\image\util.py
59
60
61
62
63
64
65
def float2int_image(image, target_dtype=np.dtype(np.uint8)):
    source_dtype = image.dtype
    if source_dtype.kind not in ('i', 'u') and not target_dtype.kind == 'f':
        maxval = 2 ** (8 * target_dtype.itemsize) - 1
        return (image * maxval).astype(target_dtype)
    else:
        return image

get_center(data, offset=(0, 0))

Source code in src\util.py
339
340
341
342
343
344
345
def get_center(data, offset=(0, 0)):
    moments = get_moments(data, offset=offset)
    if moments['m00'] != 0:
        center = get_moments_center(moments)
    else:
        center = np.mean(data, 0).flatten()  # close approximation
    return center.astype(np.float32)

get_center_from_transform(transform)

Source code in src\util.py
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
def get_center_from_transform(transform):
    # from opencv:
    # t0 = (1-alpha) * cx - beta * cy
    # t1 = beta * cx + (1-alpha) * cy
    # where
    # alpha = cos(angle) * scale
    # beta = sin(angle) * scale
    # isolate cx and cy:
    t0, t1 = transform[:2, 2]
    scale = 1
    angle = np.arctan2(transform[0][1], transform[0][0])
    alpha = np.cos(angle) * scale
    beta = np.sin(angle) * scale
    cx = (t1 + t0 * (1 - alpha) / beta) / (beta + (1 - alpha) ** 2 / beta)
    cy = ((1 - alpha) * cx - t0) / beta
    return cx, cy

get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None)

Source code in src\image\util.py
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
def get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None):
    if rotation is None:
        rotation = 0

    if isinstance(data, DataTree):
        sim = msi_utils.get_sim_from_msim(data)
    else:
        sim = data
    sdims = ''.join(si_utils.get_spatial_dims_from_sim(sim))
    sdims = sdims.replace('zyx', 'xyz').replace('yx', 'xy')   # order xy(z)
    origin = si_utils.get_origin_from_sim(sim)
    translation = [origin[sdim] for sdim in sdims]

    if len(translation) == 0:
        translation = [0, 0]
    if len(translation) == 2:
        if translation0 is not None and len (translation0) == 3:
            z = translation0[2]
        else:
            z = 0
        translation = list(translation) + [z]

    if transform is not None:
        translation1, rotation1, _ = get_properties_from_transform(transform, invert=True)
        translation = np.array(translation) + translation1
        rotation += rotation1

    if transform_key is not None:
        transform1 = sim.transforms[transform_key]
        translation1, rotation1, _ = get_properties_from_transform(transform1, invert=True)
        rotation += rotation1

    return translation, rotation

get_default(x, default)

Source code in src\util.py
14
15
def get_default(x, default):
    return default if x is None else x

get_filetitle(filename)

Source code in src\util.py
135
136
137
138
def get_filetitle(filename: str) -> str:
    filebase = os.path.basename(filename)
    title = os.path.splitext(filebase)[0].rstrip('.ome')
    return title

get_image_quantile(image, quantile, axis=None)

Source code in src\image\util.py
454
455
456
def get_image_quantile(image: np.ndarray, quantile: float, axis=None) -> float:
    value = np.quantile(image, quantile, axis=axis).astype(image.dtype)
    return value

get_image_size_info(sizes_xyzct, pixel_nbytes, pixel_type, channels)

Source code in src\image\util.py
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def get_image_size_info(sizes_xyzct: list, pixel_nbytes: int, pixel_type: np.dtype, channels: list) -> str:
    image_size_info = 'XYZCT:'
    size = 0
    for i, size_xyzct in enumerate(sizes_xyzct):
        w, h, zs, cs, ts = size_xyzct
        size += np.int64(pixel_nbytes) * w * h * zs * cs * ts
        if i > 0:
            image_size_info += ','
        image_size_info += f' {w} {h} {zs} {cs} {ts}'
    image_size_info += f' Pixel type: {pixel_type} Uncompressed: {print_hbytes(size)}'
    if sizes_xyzct[0][3] == 3:
        channel_info = 'rgb'
    else:
        channel_info = ','.join([channel.get('Name', '') for channel in channels])
    if channel_info != '':
        image_size_info += f' Channels: {channel_info}'
    return image_size_info

get_image_window(image, low=0.01, high=0.99)

Source code in src\image\util.py
459
460
461
462
463
464
def get_image_window(image, low=0.01, high=0.99):
    window = (
        get_image_quantile(image, low),
        get_image_quantile(image, high)
    )
    return window

get_max_downsamples(shape, npyramid_add, pyramid_downsample)

Source code in src\image\util.py
498
499
500
501
502
503
504
505
def get_max_downsamples(shape, npyramid_add, pyramid_downsample):
    shape = list(shape)
    for i in range(npyramid_add):
        shape[-1] //= pyramid_downsample
        shape[-2] //= pyramid_downsample
        if shape[-1] < 1 or shape[-2] < 1:
            return i
    return npyramid_add

get_mean_nn_distance(points1, points2)

Source code in src\util.py
483
484
def get_mean_nn_distance(points1, points2):
    return np.mean([get_nn_distance(points1), get_nn_distance(points2)])

get_moments(data, offset=(0, 0))

Source code in src\util.py
330
331
332
def get_moments(data, offset=(0, 0)):
    moments = cv.moments((np.array(data) + offset).astype(np.float32))    # doesn't work for float64!
    return moments

get_moments_center(moments, offset=(0, 0))

Source code in src\util.py
335
336
def get_moments_center(moments, offset=(0, 0)):
    return np.array([moments['m10'], moments['m01']]) / moments['m00'] + np.array(offset)

get_nn_distance(points0)

Source code in src\util.py
472
473
474
475
476
477
478
479
480
def get_nn_distance(points0):
    points = list(set(map(tuple, points0)))     # get unique points
    if len(points) >= 2:
        tree = KDTree(points, leaf_size=2)
        dist, ind = tree.query(points, k=2)
        nn_distance = np.median(dist[:, 1])
    else:
        nn_distance = 1
    return nn_distance

get_numpy_slicing(dimension_order, **slicing)

Source code in src\image\util.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
def get_numpy_slicing(dimension_order, **slicing):
    slices = []
    for axis in dimension_order:
        index = slicing.get(axis)
        index0 = slicing.get(axis + '0')
        index1 = slicing.get(axis + '1')
        if index0 is not None and index1 is not None:
            slice1 = slice(int(index0), int(index1))
        elif index is not None:
            slice1 = int(index)
        else:
            slice1 = slice(None)
        slices.append(slice1)
    return tuple(slices)

get_orthogonal_pairs(origins, image_size_um)

Get pairs of orthogonal neighbors from a list of tiles. Tiles don't have to be placed on a regular grid.

Source code in src\util.py
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
def get_orthogonal_pairs(origins, image_size_um):
    """
    Get pairs of orthogonal neighbors from a list of tiles.
    Tiles don't have to be placed on a regular grid.
    """
    pairs = []
    angles = []
    z_positions = [pos[0] for pos in origins if len(pos) == 3]
    ordered_z = sorted(set(z_positions))
    is_mixed_3dstack = len(ordered_z) < len(z_positions)
    for i, j in np.transpose(np.triu_indices(len(origins), 1)):
        origini = np.array(origins[i])
        originj = np.array(origins[j])
        if is_mixed_3dstack:
            # ignore z value for distance
            distance = math.dist(origini[-2:], originj[-2:])
            min_distance = max(image_size_um[-2:])
            z_i, z_j = origini[0], originj[0]
            is_same_z = (z_i == z_j)
            is_close_z = abs(ordered_z.index(z_i) - ordered_z.index(z_j)) <= 1
            if not is_same_z:
                # for tiles in different z stack, require greater overlap
                min_distance *= 0.8
            ok = (distance < min_distance and is_close_z)
        else:
            distance = math.dist(origini, originj)
            min_distance = max(image_size_um)
            ok = (distance < min_distance)
        if ok:
            pairs.append((i, j))
            vector = origini - originj
            angle = math.degrees(math.atan2(vector[1], vector[0]))
            if distance < min(image_size_um):
                angle += 90
            while angle < -90:
                angle += 180
            while angle > 90:
                angle -= 180
            angles.append(angle)
    return pairs, angles

get_properties_from_transform(transform, invert=False)

Source code in src\image\util.py
660
661
662
663
664
665
666
667
668
669
670
671
def get_properties_from_transform(transform, invert=False):
    if len(transform.shape) == 3:
        transform = transform[0]
    if invert:
        transform = param_utils.invert_coordinate_order(transform)
    transform = np.array(transform)
    translation = param_utils.translation_from_affine(transform)
    if len(translation) == 2:
        translation = list(translation) + [0]
    rotation = get_rotation_from_transform(transform)
    scale = get_scale_from_transform(transform)
    return translation, rotation, scale

get_quantile_filename(cache_location, quantile)

Source code in src\image\flatfield.py
28
29
30
def get_quantile_filename(cache_location, quantile):
    filename = os.path.join(cache_location, 'quantile_' + f'{quantile}'.replace('.', '_') + '.tiff')
    return filename

get_rotation_from_transform(transform)

Source code in src\util.py
427
428
429
def get_rotation_from_transform(transform):
    rotation = np.rad2deg(np.arctan2(transform[0][1], transform[0][0]))
    return rotation

get_scale_from_transform(transform)

Source code in src\util.py
397
398
399
def get_scale_from_transform(transform):
    scale = np.mean(np.linalg.norm(transform, axis=0)[:-1])
    return scale

get_sim_physical_size(sim, invert=False)

Source code in src\image\util.py
620
621
622
623
624
def get_sim_physical_size(sim, invert=False):
    size = si_utils.get_shape_from_sim(sim, asarray=True) * si_utils.get_spacing_from_sim(sim, asarray=True)
    if invert:
        size = np.flip(size)
    return size

get_sim_position_final(sim)

Source code in src\image\util.py
540
541
542
543
544
545
def get_sim_position_final(sim):
    transform_keys = si_utils.get_tranform_keys_from_sim(sim)
    transform = combine_transforms([np.array(si_utils.get_affine_from_sim(sim, transform_key))
                                    for transform_key in transform_keys])
    position = apply_transform([si_utils.get_origin_from_sim(sim, asarray=True)], transform)[0]
    return position

get_sim_shape_2d(sim, transform_key=None)

Source code in src\image\util.py
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
def get_sim_shape_2d(sim, transform_key=None):
    if 't' in sim.coords.xindexes:
        # work-around for points error in get_overlap_bboxes()
        sim1 = si_utils.sim_sel_coords(sim, {'t': 0})
    else:
        sim1 = sim
    stack_props = si_utils.get_stack_properties_from_sim(sim1, transform_key=transform_key)
    vertices = mv_graph.get_vertices_from_stack_props(stack_props)
    if vertices.shape[1] == 3:
        # remove z coordinate
        vertices = vertices[:, 1:]
    if len(vertices) >= 8:
        # remove redundant x/y vertices
        vertices = vertices[:4]
    if len(vertices) >= 4:
        # last 2 vertices appear to be swapped
        vertices[2:] = np.array(list(reversed(vertices[2:])))
    return vertices

get_translation_from_transform(transform)

Source code in src\util.py
402
403
404
405
406
def get_translation_from_transform(transform):
    ndim = len(transform) - 1
    #translation = transform[:ndim, ndim]
    translation = apply_transform([[0] * ndim], transform)[0]
    return translation

get_unique_file_labels(filenames)

Source code in src\util.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def get_unique_file_labels(filenames: list) -> list:
    file_labels = []
    file_parts = []
    label_indices = set()
    last_parts = None
    for filename in filenames:
        parts = split_numeric(filename)
        if len(parts) == 0:
            parts = split_numeric(filename)
            if len(parts) == 0:
                parts = filename
        file_parts.append(parts)
        if last_parts is not None:
            for parti, (part1, part2) in enumerate(zip(last_parts, parts)):
                if part1 != part2:
                    label_indices.add(parti)
        last_parts = parts
    label_indices = sorted(list(label_indices))

    for file_part in file_parts:
        file_label = '_'.join([file_part[i] for i in label_indices])
        file_labels.append(file_label)

    if len(set(file_labels)) < len(file_labels):
        # fallback for duplicate labels
        file_labels = [get_filetitle(filename) for filename in filenames]

    return file_labels

get_value_units_micrometer(value_units0)

Source code in src\util.py
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def get_value_units_micrometer(value_units0: list|dict) -> list|dict|None:
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    if value_units0 is None:
        return None

    if isinstance(value_units0, dict):
        values_um = {}
        for dim, value_unit in value_units0.items():
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um[dim] = value_um
    else:
        values_um = []
        for value_unit in value_units0:
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um.append(value_um)
    return values_um

grayscale_image(image)

Source code in src\image\util.py
32
33
34
35
36
37
38
39
def grayscale_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 4:
        return cv.cvtColor(image, cv.COLOR_RGBA2GRAY)
    elif nchannels > 1:
        return cv.cvtColor(image, cv.COLOR_RGB2GRAY)
    else:
        return image

group_sims_by_z(sims)

Source code in src\image\util.py
548
549
550
551
552
553
554
555
556
557
558
559
560
561
def group_sims_by_z(sims):
    grouped_sims = []
    z_positions = [si_utils.get_origin_from_sim(sim).get('z') for sim in sims]
    is_mixed_3dstack = len(set(z_positions)) < len(z_positions)
    if is_mixed_3dstack:
        sims_by_z = {}
        for simi, z_pos in enumerate(z_positions):
            if z_pos is not None and z_pos not in sims_by_z:
                sims_by_z[z_pos] = []
            sims_by_z[z_pos].append(simi)
        grouped_sims = list(sims_by_z.values())
    if len(grouped_sims) == 0:
        grouped_sims = [list(range(len(sims)))]
    return grouped_sims

image_flatfield_correction(image0, dark, bright_dark_range, mean_bright_dark, clip=True)

Source code in src\image\flatfield.py
88
89
90
91
92
93
94
95
96
97
98
def image_flatfield_correction(image0, dark, bright_dark_range, mean_bright_dark, clip=True):
    # Input/output: float images
    # https://en.wikipedia.org/wiki/Flat-field_correction
    image = (image0 - dark) * mean_bright_dark / bright_dark_range
    if clip:
        image = image.clip(0, 1)    # np.clip(image) is not dask-compatible, use image.clip() instead
    else:
        image -= np.min(image)
        if np.max(image) > 1:
            image /= np.max(image)
    return image

image_reshape(image, target_size)

Source code in src\image\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
def image_reshape(image: np.ndarray, target_size: tuple) -> np.ndarray:
    tw, th = target_size
    sh, sw = image.shape[0:2]
    if sw < tw or sh < th:
        dw = max(tw - sw, 0)
        dh = max(th - sh, 0)
        padding = [(dh // 2, dh - dh //  2), (dw // 2, dw - dw // 2)]
        if len(image.shape) == 3:
            padding += [(0, 0)]
        image = np.pad(image, padding, mode='constant', constant_values=(0, 0))
    if tw < sw or th < sh:
        image = image[0:th, 0:tw]
    return image

image_resize(image, target_size0, dimension_order='yxc')

Source code in src\image\util.py
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
def image_resize(image: np.ndarray, target_size0: tuple, dimension_order: str = 'yxc') -> np.ndarray:
    shape = image.shape
    x_index = dimension_order.index('x')
    y_index = dimension_order.index('y')
    c_is_at_end = ('c' in dimension_order and dimension_order.endswith('c'))
    size = shape[x_index], shape[y_index]
    if np.mean(np.divide(size, target_size0)) < 1:
        interpolation = cv.INTER_CUBIC
    else:
        interpolation = cv.INTER_AREA
    dtype0 = image.dtype
    image = ensure_unsigned_image(image)
    target_size = tuple(np.maximum(np.round(target_size0).astype(int), 1))
    if dimension_order in ['yxc', 'yx']:
        new_image = cv.resize(np.asarray(image), target_size, interpolation=interpolation)
    elif dimension_order == 'cyx':
        new_image = np.moveaxis(image, 0, -1)
        new_image = cv.resize(np.asarray(new_image), target_size, interpolation=interpolation)
        new_image = np.moveaxis(new_image, -1, 0)
    else:
        ts = image.shape[dimension_order.index('t')] if 't' in dimension_order else 1
        zs = image.shape[dimension_order.index('z')] if 'z' in dimension_order else 1
        target_shape = list(image.shape).copy()
        target_shape[x_index] = target_size[0]
        target_shape[y_index] = target_size[1]
        new_image = np.zeros(target_shape, dtype=image.dtype)
        for t in range(ts):
            for z in range(zs):
                slices = get_numpy_slicing(dimension_order, z=z, t=t)
                image1 = image[slices]
                if not c_is_at_end:
                    image1 = np.moveaxis(image1, 0, -1)
                new_image1 = np.atleast_3d(cv.resize(np.asarray(image1), target_size, interpolation=interpolation))
                if not c_is_at_end:
                    new_image1 = np.moveaxis(new_image1, -1, 0)
                new_image[slices] = new_image1
    new_image = convert_image_sign_type(new_image, dtype0)
    return new_image

import_csv(filename)

Source code in src\util.py
596
597
598
599
def import_csv(filename):
    with open(filename, encoding='utf8') as file:
        data = csv.reader(file)
    return data

import_json(filename)

Source code in src\util.py
585
586
587
588
def import_json(filename):
    with open(filename, encoding='utf8') as file:
        data = json.load(file)
    return data

import_metadata(content, fields=None, input_path=None)

Source code in src\util.py
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
def import_metadata(content, fields=None, input_path=None):
    # return dict[id] = {values}
    if isinstance(content, str):
        ext = os.path.splitext(content)[1].lower()
        if input_path:
            if isinstance(input_path, list):
                input_path = input_path[0]
            content = os.path.normpath(os.path.join(os.path.dirname(input_path), content))
        if ext == '.csv':
            content = import_csv(content)
        elif ext in ['.json', '.ome.json']:
            content = import_json(content)
    if fields is not None:
        content = [[data[field] for field in fields] for data in content]
    return content

int2float_image(image)

Source code in src\image\util.py
50
51
52
53
54
55
56
def int2float_image(image):
    source_dtype = image.dtype
    if not source_dtype.kind == 'f':
        maxval = 2 ** (8 * source_dtype.itemsize) - 1
        return image / np.float32(maxval)
    else:
        return image

norm_image_quantiles(image0, quantile=0.99)

Source code in src\image\util.py
484
485
486
487
488
489
490
491
492
493
494
495
def norm_image_quantiles(image0, quantile=0.99):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    min_value = np.quantile(image, 1 - quantile)
    max_value = np.quantile(image, quantile)
    normimage = (image - np.mean(image)) / (max_value - min_value)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

norm_image_variance(image0)

Source code in src\image\util.py
472
473
474
475
476
477
478
479
480
481
def norm_image_variance(image0):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    normimage = (image - np.mean(image)) / np.std(image)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

normalise(sims, transform_key, use_global=True)

Source code in src\image\util.py
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
def normalise(sims, transform_key, use_global=True):
    new_sims = []
    dtype = sims[0].dtype
    # global mean and stddev
    if use_global:
        mins = []
        ranges = []
        for sim in sims:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
            #min, max = get_image_window(sim, low=0.01, high=0.99)
            #range = max - min
            mins.append(min)
            ranges.append(range)
        min = np.mean(mins)
        range = np.mean(ranges)
    else:
        min = 0
        range = 1
    # normalise all images
    for sim in sims:
        if not use_global:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
        image = (sim - min) / range
        image = float2int_image(image.clip(0, 1), dtype)    # np.clip(image) is not dask-compatible, use image.clip() instead
        new_sim = si_utils.get_sim_from_array(
            image,
            dims=sim.dims,
            scale=si_utils.get_spacing_from_sim(sim),
            translation=si_utils.get_origin_from_sim(sim),
            transform_key=transform_key,
            affine=si_utils.get_affine_from_sim(sim, transform_key),
            c_coords=sim.c.data,
            t_coords=sim.t.data
        )
        new_sims.append(new_sim)
    return new_sims

normalise_rotated_positions(positions0, rotations0, size, center)

Source code in src\util.py
456
457
458
459
460
461
462
463
464
465
466
467
468
469
def normalise_rotated_positions(positions0, rotations0, size, center):
    # in [xy(z)]
    positions = []
    rotations = []
    _, angles = get_orthogonal_pairs(positions0, size)
    for position0, rotation in zip(positions0, rotations0):
        if rotation is None and len(angles) > 0:
            rotation = -np.mean(angles)
        angle = -rotation if rotation is not None else None
        transform = create_transform(center=center, angle=angle, matrix_size=4)
        position = apply_transform([position0], transform)[0]
        positions.append(position)
        rotations.append(rotation)
    return positions, rotations

normalise_rotation(rotation)

Normalise rotation to be in the range [-180, 180].

Source code in src\util.py
432
433
434
435
436
437
438
439
440
def normalise_rotation(rotation):
    """
    Normalise rotation to be in the range [-180, 180].
    """
    while rotation < -180:
        rotation += 360
    while rotation > 180:
        rotation -= 360
    return rotation

normalise_values(image, min_value, max_value)

Source code in src\image\util.py
467
468
469
def normalise_values(image: np.ndarray, min_value: float, max_value: float) -> np.ndarray:
    image = (image.astype(np.float32) - min_value) / (max_value - min_value)
    return image.clip(0, 1)

pilmode_to_pixelinfo(mode)

Source code in src\image\util.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
def pilmode_to_pixelinfo(mode: str) -> tuple:
    pixelinfo = (np.uint8, 8, 1)
    mode_types = {
        'I': (np.uint32, 32, 1),
        'F': (np.float32, 32, 1),
        'RGB': (np.uint8, 24, 3),
        'RGBA': (np.uint8, 32, 4),
        'CMYK': (np.uint8, 32, 4),
        'YCbCr': (np.uint8, 24, 3),
        'LAB': (np.uint8, 24, 3),
        'HSV': (np.uint8, 24, 3),
    }
    if '16' in mode:
        pixelinfo = (np.uint16, 16, 1)
    elif '32' in mode:
        pixelinfo = (np.uint32, 32, 1)
    elif mode in mode_types:
        pixelinfo = mode_types[mode]
    pixelinfo = (np.dtype(pixelinfo[0]), pixelinfo[1])
    return pixelinfo

points_to_3d(points)

Source code in src\util.py
443
444
def points_to_3d(points):
    return [list(point) + [0] for point in points]

precise_resize(image, factors)

Source code in src\image\util.py
272
273
274
275
276
def precise_resize(image: np.ndarray, factors) -> np.ndarray:
    if image.ndim > len(factors):
        factors = list(factors) + [1]
    new_image = downscale_local_mean(np.asarray(image), tuple(factors)).astype(image.dtype)
    return new_image

print_dict(dct, indent=0)

Source code in src\util.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def print_dict(dct: dict, indent: int = 0) -> str:
    s = ''
    if isinstance(dct, dict):
        for key, value in dct.items():
            s += '\n'
            if not isinstance(value, list):
                s += '\t' * indent + str(key) + ': '
            if isinstance(value, dict):
                s += print_dict(value, indent=indent + 1)
            elif isinstance(value, list):
                for v in value:
                    s += print_dict(v)
            else:
                s += str(value)
    else:
        s += str(dct)
    return s

print_hbytes(nbytes)

Source code in src\util.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def print_hbytes(nbytes: int) -> str:
    exps = ['', 'K', 'M', 'G', 'T', 'P', 'E']
    div = 1024
    exp = 0

    while nbytes > div:
        nbytes /= div
        exp += 1
    if exp < len(exps):
        e = exps[exp]
    else:
        e = f'e{exp * 3}'
    return f'{nbytes:.1f}{e}B'

redimension_data(data, old_order, new_order, **indices)

Source code in src\image\util.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def redimension_data(data, old_order, new_order, **indices):
    # able to provide optional dimension values e.g. t=0, z=0
    if new_order == old_order:
        return data

    new_data = data
    order = old_order
    # remove
    for o in old_order:
        if o not in new_order:
            index = order.index(o)
            dim_value = indices.get(o, 0)
            new_data = np.take(new_data, indices=dim_value, axis=index)
            order = order[:index] + order[index + 1:]
    # add
    for o in new_order:
        if o not in order:
            new_data = np.expand_dims(new_data, 0)
            order = o + order
    # move
    old_indices = [order.index(o) for o in new_order]
    new_indices = list(range(len(new_order)))
    new_data = np.moveaxis(new_data, old_indices, new_indices)
    return new_data

reorder(items, old_order, new_order, default_value=0)

Source code in src\util.py
27
28
29
30
31
32
33
34
35
def reorder(items: list, old_order: str, new_order: str, default_value: int = 0) -> list:
    new_items = []
    for label in new_order:
        if label in old_order:
            item = items[old_order.index(label)]
        else:
            item = default_value
        new_items.append(item)
    return new_items

resize_image(image, new_size)

Source code in src\image\util.py
224
225
226
227
228
229
def resize_image(image, new_size):
    if not isinstance(new_size, (tuple, list, np.ndarray)):
        # use single value for width; apply aspect ratio
        size = np.flip(image.shape[:2])
        new_size = new_size, new_size * size[1] // size[0]
    return cv.resize(image, new_size)

retuple(chunks, shape)

Expand chunks to match shape.

E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028) return (3, 4, 5, 64, 64)

If chunks is an integer, it is applied to all dimensions, to match the behaviour of zarr-python.

Source code in src\util.py
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def retuple(chunks, shape):
    # from ome-zarr-py
    """
    Expand chunks to match shape.

    E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028)
    return (3, 4, 5, 64, 64)

    If chunks is an integer, it is applied to all dimensions, to match
    the behaviour of zarr-python.
    """

    if isinstance(chunks, int):
        return tuple([chunks] * len(shape))

    dims_to_add = len(shape) - len(chunks)
    return *shape[:dims_to_add], *chunks

round_significants(a, significant_digits)

Source code in src\util.py
124
125
126
127
128
def round_significants(a: float, significant_digits: int) -> float:
    if a != 0:
        round_decimals = significant_digits - int(np.floor(np.log10(abs(a)))) - 1
        return round(a, round_decimals)
    return a

show_image(image, title='', cmap=None)

Source code in src\image\util.py
22
23
24
25
26
27
28
29
def show_image(image, title='', cmap=None):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if cmap is None:
        cmap = 'gray' if nchannels == 1 else None
    plt.imshow(image, cmap=cmap)
    if title != '':
        plt.title(title)
    plt.show()

split_num_text(text)

Source code in src\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
def split_num_text(text: str) -> list:
    num_texts = []
    block = ''
    is_num0 = None
    if text is None:
        return None

    for c in text:
        is_num = (c.isnumeric() or c == '.')
        if is_num0 is not None and is_num != is_num0:
            num_texts.append(block)
            block = ''
        block += c
        is_num0 = is_num
    if block != '':
        num_texts.append(block)

    num_texts2 = []
    for block in num_texts:
        block = block.strip()
        try:
            block = float(block)
        except:
            pass
        if block not in [' ', ',', '|']:
            num_texts2.append(block)
    return num_texts2

split_numeric(text)

Source code in src\util.py
153
154
155
156
157
158
159
160
def split_numeric(text: str) -> list:
    num_parts = []
    parts = re.split(r'[_/\\.]', text)
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            num_parts.append(part)
    return num_parts

split_numeric_dict(text)

Source code in src\util.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
def split_numeric_dict(text: str) -> dict:
    num_parts = {}
    parts = re.split(r'[_/\\.]', text)
    parti = 0
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            index = num_span.start()
            label = part[:index]
            if label == '':
                label = parti
            num_parts[label] = num_span.group()
            parti += 1
    return num_parts

split_path(path)

Source code in src\util.py
131
132
def split_path(path: str) -> list:
    return os.path.normpath(path).split(os.path.sep)

split_value_unit_list(text)

Source code in src\util.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
def split_value_unit_list(text: str) -> list:
    value_units = []
    if text is None:
        return None

    items = split_num_text(text)
    if isinstance(items[-1], str):
        def_unit = items[-1]
    else:
        def_unit = ''

    i = 0
    while i < len(items):
        value = items[i]
        if i + 1 < len(items):
            unit = items[i + 1]
        else:
            unit = ''
        if not isinstance(value, str):
            if isinstance(unit, str):
                i += 1
            else:
                unit = def_unit
            value_units.append((value, unit))
        i += 1
    return value_units

uint8_image(image)

Source code in src\image\util.py
68
69
70
71
72
73
74
75
def uint8_image(image):
    source_dtype = image.dtype
    if source_dtype.kind == 'f':
        image = image * 255
    elif source_dtype.itemsize != 1:
        factor = 2 ** (8 * (source_dtype.itemsize - 1))
        image = image // factor
    return image.astype(np.uint8)

validate_transform(transform, max_rotation=None)

Source code in src\util.py
382
383
384
385
386
387
388
389
390
391
392
393
394
def validate_transform(transform, max_rotation=None):
    if transform is None:
        return False
    transform = np.array(transform)
    if np.any(np.isnan(transform)):
        return False
    if np.any(np.isinf(transform)):
        return False
    if np.linalg.det(transform) == 0:
        return False
    if  max_rotation is not None and abs(normalise_rotation(get_rotation_from_transform(transform))) > max_rotation:
        return False
    return True

xyz_to_dict(xyz, axes='xyz')

Source code in src\util.py
447
448
449
def xyz_to_dict(xyz, axes='xyz'):
    dct = {dim: value for dim, value in zip(axes, xyz)}
    return dct

ome_helper

apply_transform(points, transform)

Source code in src\util.py
371
372
373
374
375
376
377
378
379
def apply_transform(points, transform):
    new_points = []
    for point in points:
        point_len = len(point)
        while len(point) < len(transform):
            point = list(point) + [1]
        new_point = np.dot(point, np.transpose(np.array(transform)))
        new_points.append(new_point[:point_len])
    return new_points

blur_image(image, sigma)

Source code in src\image\util.py
432
433
434
435
436
437
438
439
440
def blur_image(image, sigma):
    nchannels = image.shape[2] if image.ndim == 3 else 1
    if nchannels not in [1, 3]:
        new_image = np.zeros_like(image)
        for channeli in range(nchannels):
            new_image[..., channeli] = blur_image_single(image[..., channeli], sigma)
    else:
        new_image = blur_image_single(image, sigma)
    return new_image

blur_image_single(image, sigma)

Source code in src\image\util.py
428
429
def blur_image_single(image, sigma):
    return gaussian_filter(image, sigma)

calc_foreground_map(sims)

Source code in src\image\util.py
564
565
566
567
568
569
570
571
572
573
574
575
576
577
def calc_foreground_map(sims):
    if len(sims) <= 2:
        return [True] * len(sims)
    sims = [sim.squeeze().astype(np.float32) for sim in sims]
    median_image = calc_images_median(sims).astype(np.float32)
    difs = [np.mean(np.abs(sim - median_image), (0, 1)) for sim in sims]
    # or use stddev instead of mean?
    threshold = np.mean(difs, 0)
    #threshold, _ = cv.threshold(np.array(difs).astype(np.uint16), 0, 1, cv.THRESH_OTSU)
    #threshold, foregrounds = filter_noise_images(channel_images)
    map = (difs > threshold)
    if np.all(map == False):
        return [True] * len(sims)
    return map

calc_images_median(images)

Source code in src\image\util.py
443
444
445
446
def calc_images_median(images):
    out_image = np.zeros(shape=images[0].shape, dtype=images[0].dtype)
    median_image = np.median(images, 0, out_image)
    return median_image

calc_images_quantiles(images, quantiles)

Source code in src\image\util.py
449
450
451
def calc_images_quantiles(images, quantiles):
    quantile_images = [image.astype(np.float32) for image in np.quantile(images, quantiles, 0)]
    return quantile_images

calc_output_properties(sims, transform_key, z_scale=None)

Source code in src\image\util.py
627
628
629
630
631
632
633
634
635
636
637
def calc_output_properties(sims, transform_key, z_scale=None):
    output_spacing = si_utils.get_spacing_from_sim(sims[0])
    if z_scale is not None:
        output_spacing['z'] = z_scale
    output_properties = fusion.calc_fusion_stack_properties(
        sims,
        [si_utils.get_affine_from_sim(sim, transform_key) for sim in sims],
        output_spacing,
        mode='union',
    )
    return output_properties

calc_pyramid(xyzct, npyramid_add=0, pyramid_downsample=2, volumetric_resize=False)

Source code in src\image\util.py
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def calc_pyramid(xyzct: tuple, npyramid_add: int = 0, pyramid_downsample: float = 2,
                 volumetric_resize: bool = False) -> list:
    x, y, z, c, t = xyzct
    if volumetric_resize and z > 1:
        size = (x, y, z)
    else:
        size = (x, y)
    sizes_add = []
    scale = 1
    for _ in range(npyramid_add):
        scale /= pyramid_downsample
        scaled_size = np.maximum(np.round(np.multiply(size, scale)).astype(int), 1)
        sizes_add.append(scaled_size)
    return sizes_add

check_round_significants(a, significant_digits)

Source code in src\util.py
113
114
115
116
117
118
119
120
121
def check_round_significants(a: float, significant_digits: int) -> float:
    rounded = round_significants(a, significant_digits)
    if a != 0:
        dif = 1 - rounded / a
    else:
        dif = rounded - a
    if abs(dif) < 10 ** -significant_digits:
        return rounded
    return a

color_image(image)

Source code in src\image\util.py
42
43
44
45
46
47
def color_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 1:
        return cv.cvtColor(np.array(image), cv.COLOR_GRAY2RGB)
    else:
        return image

combine_transforms(transforms)

Source code in src\image\util.py
709
710
711
712
713
714
715
716
def combine_transforms(transforms):
    combined_transform = None
    for transform in transforms:
        if combined_transform is None:
            combined_transform = transform
        else:
            combined_transform = np.dot(transform, combined_transform)
    return combined_transform

convert_image_sign_type(image, target_dtype)

Source code in src\image\util.py
 97
 98
 99
100
101
102
103
104
105
106
107
def convert_image_sign_type(image: np.ndarray, target_dtype: np.dtype) -> np.ndarray:
    source_dtype = image.dtype
    if source_dtype.kind == target_dtype.kind:
        new_image = image
    elif source_dtype.kind == 'i':
        new_image = ensure_unsigned_image(image)
    else:
        # conversion without overhead
        offset = 2 ** (8 * target_dtype.itemsize - 1)
        new_image = (image - offset).astype(target_dtype)
    return new_image

convert_rational_value(value)

Source code in src\util.py
321
322
323
324
325
326
327
def convert_rational_value(value) -> float:
    if value is not None and isinstance(value, tuple):
        if value[0] == value[1]:
            value = value[0]
        else:
            value = value[0] / value[1]
    return value

convert_to_um(value, unit)

Source code in src\util.py
310
311
312
313
314
315
316
317
318
def convert_to_um(value, unit):
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    return value * conversions.get(unit, 1)

create_compression_filter(compression)

Source code in src\image\util.py
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
def create_compression_filter(compression: list) -> tuple:
    compressor, compression_filters = None, None
    compression = ensure_list(compression)
    if compression is not None and len(compression) > 0:
        compression_type = compression[0].lower()
        if len(compression) > 1:
            level = int(compression[1])
        else:
            level = None
        if 'lzw' in compression_type:
            from imagecodecs.numcodecs import Lzw
            compression_filters = [Lzw()]
        elif '2k' in compression_type or '2000' in compression_type:
            from imagecodecs.numcodecs import Jpeg2k
            compression_filters = [Jpeg2k(level=level)]
        elif 'jpegls' in compression_type:
            from imagecodecs.numcodecs import Jpegls
            compression_filters = [Jpegls(level=level)]
        elif 'jpegxr' in compression_type:
            from imagecodecs.numcodecs import Jpegxr
            compression_filters = [Jpegxr(level=level)]
        elif 'jpegxl' in compression_type:
            from imagecodecs.numcodecs import Jpegxl
            compression_filters = [Jpegxl(level=level)]
        else:
            compressor = compression
    return compressor, compression_filters

create_transform(center, angle, matrix_size=3)

Source code in src\util.py
356
357
358
359
360
361
362
363
364
365
366
367
368
def create_transform(center, angle, matrix_size=3):
    if isinstance(center, dict):
        center = dict_to_xyz(center)
    if len(center) == 2:
        center = np.array(list(center) + [0])
    if angle is None:
        angle = 0
    r = Rotation.from_euler('z', angle, degrees=True)
    t = center - r.apply(center, inverse=True)
    transform = np.eye(matrix_size)
    transform[:3, :3] = np.transpose(r.as_matrix())
    transform[:3, -1] += t
    return transform

create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0))

Source code in src\util.py
348
349
350
351
352
353
def create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0)):
    transform = cv.getRotationMatrix2D(center[:2], angle, scale)
    transform[:, 2] += translate
    if len(transform) == 2:
        transform = np.vstack([transform, [0, 0, 1]])   # create 3x3 matrix
    return transform

desc_to_dict(desc)

Source code in src\util.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def desc_to_dict(desc: str) -> dict:
    desc_dict = {}
    if desc.startswith('{'):
        try:
            metadata = ast.literal_eval(desc)
            return metadata
        except:
            pass
    for item in re.split(r'[\r\n\t|]', desc):
        item_sep = '='
        if ':' in item:
            item_sep = ':'
        if item_sep in item:
            items = item.split(item_sep)
            key = items[0].strip()
            value = items[1].strip()
            for dtype in (int, float, bool):
                try:
                    value = dtype(value)
                    break
                except:
                    pass
            desc_dict[key] = value
    return desc_dict

detect_area_points(image)

Source code in src\image\util.py
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
def detect_area_points(image):
    method = cv.THRESH_OTSU
    threshold = -5
    contours = []
    while len(contours) <= 1 and threshold <= 255:
        _, binimage = cv.threshold(np.array(uint8_image(image)), threshold, 255, method)
        contours0 = cv.findContours(binimage, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
        contours = contours0[0] if len(contours0) == 2 else contours0[1]
        method = cv.THRESH_BINARY
        threshold += 5
    area_contours = [(contour, cv.contourArea(contour)) for contour in contours]
    area_contours.sort(key=lambda contour_area: contour_area[1], reverse=True)
    min_area = max(np.mean([area for contour, area in area_contours]), 1)
    area_points = [(get_center(contour), area) for contour, area in area_contours if area > min_area]

    #image = cv.cvtColor(image, cv.COLOR_GRAY2BGR)
    #for point in area_points:
    #    radius = int(np.round(np.sqrt(point[1]/np.pi)))
    #    cv.circle(image, tuple(np.round(point[0]).astype(int)), radius, (255, 0, 0), -1)
    #show_image(image)
    return area_points

dict_to_xyz(dct, keys='xyz')

Source code in src\util.py
452
453
def dict_to_xyz(dct, keys='xyz'):
    return [dct[key] for key in keys if key in dct]

dir_regex(pattern)

Source code in src\util.py
141
142
143
144
145
146
def dir_regex(pattern):
    files = []
    for pattern_item in ensure_list(pattern):
        files.extend(glob.glob(pattern_item, recursive=True))
    files_sorted = sorted(files, key=lambda file: find_all_numbers(get_filetitle(file)))
    return files_sorted

draw_edge_filter(bounds)

Source code in src\util.py
496
497
498
499
500
501
502
503
504
def draw_edge_filter(bounds):
    out_image = np.zeros(np.flip(bounds))
    y, x = np.where(out_image == 0)
    points = np.transpose([x, y])

    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) * 10, 0, 1)
    return position_weights.reshape(np.flip(bounds))

draw_keypoints(image, points, color=(255, 0, 0))

Source code in src\image\util.py
279
280
281
282
283
284
def draw_keypoints(image, points, color=(255, 0, 0)):
    out_image = color_image(float2int_image(image))
    for point in points:
        point = np.round(point).astype(int)
        cv.drawMarker(out_image, tuple(point), color=color, markerType=cv.MARKER_CROSS, markerSize=5, thickness=1)
    return out_image

draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None, points_color='black', match_color='red', inlier_color='lime', show_plot=True, output_filename=None)

Source code in src\image\util.py
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
def draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None,
                           points_color='black', match_color='red', inlier_color='lime',
                           show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape = np.max([image.shape for image in [image1, image2]], axis=0)
    shape_y, shape_x = shape[:2]
    if shape_x > 2 * shape_y:
        merge_axis = 0
        offset2 = [shape_y, 0]
    else:
        merge_axis = 1
        offset2 = [0, shape_x]
    image = np.concatenate([
        np.pad(image1, ((0, shape[0] - image1.shape[0]), (0, shape[1] - image1.shape[1]))),
        np.pad(image2, ((0, shape[0] - image2.shape[0]), (0, shape[1] - image2.shape[1])))
    ], axis=merge_axis)
    ax.imshow(image, cmap='gray')

    ax.scatter(
        points1[:, 1],
        points1[:, 0],
        facecolors='none',
        edgecolors=points_color,
    )
    ax.scatter(
        points2[:, 1] + offset2[1],
        points2[:, 0] + offset2[0],
        facecolors='none',
        edgecolors=points_color,
    )

    for i, match in enumerate(matches):
        color = match_color
        if i < len(inliers) and inliers[i]:
            color = inlier_color
        index1, index2 = match
        ax.plot(
            (points1[index1, 1], points2[index2, 1] + offset2[1]),
            (points1[index1, 0], points2[index2, 0] + offset2[0]),
            '-', linewidth=1, alpha=0.5, color=color,
        )

    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

    return fig, ax

draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None, color=(255, 0, 0), inlier_color=(0, 255, 0), radius=15, thickness=2)

Source code in src\image\util.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
def draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None,
                              color=(255, 0, 0), inlier_color=(0, 255, 0), radius = 15, thickness = 2):
    # based on https://gist.github.com/woolpeeker/d7e1821e1b5c556b32aafe10b7a1b7e8
    image1 = uint8_image(image1)
    image2 = uint8_image(image2)
    # We're drawing them side by side.  Get dimensions accordingly.
    new_shape = (max(image1.shape[0], image2.shape[0]), image1.shape[1] + image2.shape[1], 3)
    out_image = np.zeros(new_shape, image1.dtype)
    # Place images onto the new image.
    out_image[0:image1.shape[0], 0:image1.shape[1]] = color_image(image1)
    out_image[0:image2.shape[0], image1.shape[1]:image1.shape[1] + image2.shape[1]] = color_image(image2)

    if matches is not None:
        # Draw lines between matches.  Make sure to offset kp coords in second image appropriately.
        for index, match in enumerate(matches):
            if inliers is not None and inliers[index]:
                line_color = inlier_color
            else:
                line_color = color
            # So the keypoint locs are stored as a tuple of floats.  cv2.line() wants locs as a tuple of ints.
            end1 = tuple(np.round(points1[match[0]]).astype(int))
            end2 = tuple(np.round(points2[match[1]]).astype(int) + np.array([image1.shape[1], 0]))
            cv.line(out_image, end1, end2, line_color, thickness)
            cv.circle(out_image, end1, radius, line_color, thickness)
            cv.circle(out_image, end2, radius, line_color, thickness)
    else:
        # Draw all points if no matches are provided.
        for point in points1:
            point = tuple(np.round(point).astype(int))
            cv.circle(out_image, point, radius, color, thickness)
        for point in points2:
            point = tuple(np.round(point).astype(int) + np.array([image1.shape[1], 0]))
            cv.circle(out_image, point, radius, color, thickness)
    return out_image

draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None, show_plot=True, output_filename=None)

Source code in src\image\util.py
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
def draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None,
                              show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape_y, shape_x = image1.shape[:2]
    if shape_x > 2 * shape_y:
        alignment = 'vertical'
    else:
        alignment = 'horizontal'
    plot_matched_features(
        image1,
        image2,
        keypoints0=points1,
        keypoints1=points2,
        matches=matches,
        ax=ax,
        alignment=alignment,
        only_matches=True,
    )
    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

ensure_list(x)

Source code in src\util.py
18
19
20
21
22
23
24
def ensure_list(x) -> list:
    if x is None:
        return []
    elif isinstance(x, list):
        return x
    else:
        return [x]

ensure_unsigned_image(image)

Source code in src\image\util.py
85
86
87
88
89
90
91
92
93
94
def ensure_unsigned_image(image: np.ndarray) -> np.ndarray:
    source_dtype = image.dtype
    dtype = ensure_unsigned_type(source_dtype)
    if dtype != source_dtype:
        # conversion without overhead
        offset = 2 ** (8 * dtype.itemsize - 1)
        new_image = image.astype(dtype) + offset
    else:
        new_image = image
    return new_image

ensure_unsigned_type(dtype)

Source code in src\image\util.py
78
79
80
81
82
def ensure_unsigned_type(dtype: np.dtype) -> np.dtype:
    new_dtype = dtype
    if dtype.kind == 'i' or dtype.byteorder == '>' or dtype.byteorder == '<':
        new_dtype = np.dtype(f'u{dtype.itemsize}')
    return new_dtype

eval_context(data, key, default_value, context)

Source code in src\util.py
266
267
268
269
270
271
272
273
274
275
276
277
def eval_context(data, key, default_value, context):
    value = data.get(key, default_value)
    if isinstance(value, str):
        try:
            value = value.format_map(context)
        except:
            pass
        try:
            value = eval(value, context)
        except:
            pass
    return value

exists_output_image(path, output_format)

Source code in src\image\ome_helper.py
52
53
54
55
56
57
58
def exists_output_image(path, output_format):
    exists = True
    if 'zar' in output_format:
        exists = exists and os.path.exists(path + zarr_extension)
    if 'tif' in output_format:
        exists = exists and os.path.exists(path + tiff_extension)
    return exists

export_csv(filename, data, header=None)

Source code in src\util.py
602
603
604
605
606
607
608
def export_csv(filename, data, header=None):
    with open(filename, 'w', encoding='utf8', newline='') as file:
        csvwriter = csv.writer(file)
        if header is not None:
            csvwriter.writerow(header)
        for row in data:
            csvwriter.writerow(row)

export_json(filename, data)

Source code in src\util.py
591
592
593
def export_json(filename, data):
    with open(filename, 'w', encoding='utf8') as file:
        json.dump(data, file, indent=4)

filter_dict(dict0)

Source code in src\util.py
38
39
40
41
42
43
44
45
46
47
48
49
50
def filter_dict(dict0: dict) -> dict:
    new_dict = {}
    for key, value0 in dict0.items():
        if value0 is not None:
            values = []
            for value in ensure_list(value0):
                if isinstance(value, dict):
                    value = filter_dict(value)
                values.append(value)
            if len(values) == 1:
                values = values[0]
            new_dict[key] = values
    return new_dict

filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5)

Source code in src\util.py
487
488
489
490
491
492
493
def filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5):
    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) / filter_factor, 0, 1)
    order_weights = 1 - np.array(range(len(points))) / len(points) / 2
    weights = position_weights * order_weights
    return weights > threshold

filter_noise_images(images)

Source code in src\image\util.py
508
509
510
511
512
513
514
def filter_noise_images(images):
    dtype = images[0].dtype
    maxval = 2 ** (8 * dtype.itemsize) - 1
    image_vars = [np.asarray(np.std(image)).item() for image in images]
    threshold, mask0 = cv.threshold(np.array(image_vars).astype(dtype), 0, maxval, cv.THRESH_OTSU)
    mask = [flag.item() for flag in mask0.astype(bool)]
    return int(threshold), mask

find_all_numbers(text)

Source code in src\util.py
149
150
def find_all_numbers(text: str) -> list:
    return list(map(int, re.findall(r'\d+', text)))

float2int_image(image, target_dtype=np.dtype(np.uint8))

Source code in src\image\util.py
59
60
61
62
63
64
65
def float2int_image(image, target_dtype=np.dtype(np.uint8)):
    source_dtype = image.dtype
    if source_dtype.kind not in ('i', 'u') and not target_dtype.kind == 'f':
        maxval = 2 ** (8 * target_dtype.itemsize) - 1
        return (image * maxval).astype(target_dtype)
    else:
        return image

get_center(data, offset=(0, 0))

Source code in src\util.py
339
340
341
342
343
344
345
def get_center(data, offset=(0, 0)):
    moments = get_moments(data, offset=offset)
    if moments['m00'] != 0:
        center = get_moments_center(moments)
    else:
        center = np.mean(data, 0).flatten()  # close approximation
    return center.astype(np.float32)

get_center_from_transform(transform)

Source code in src\util.py
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
def get_center_from_transform(transform):
    # from opencv:
    # t0 = (1-alpha) * cx - beta * cy
    # t1 = beta * cx + (1-alpha) * cy
    # where
    # alpha = cos(angle) * scale
    # beta = sin(angle) * scale
    # isolate cx and cy:
    t0, t1 = transform[:2, 2]
    scale = 1
    angle = np.arctan2(transform[0][1], transform[0][0])
    alpha = np.cos(angle) * scale
    beta = np.sin(angle) * scale
    cx = (t1 + t0 * (1 - alpha) / beta) / (beta + (1 - alpha) ** 2 / beta)
    cy = ((1 - alpha) * cx - t0) / beta
    return cx, cy

get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None)

Source code in src\image\util.py
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
def get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None):
    if rotation is None:
        rotation = 0

    if isinstance(data, DataTree):
        sim = msi_utils.get_sim_from_msim(data)
    else:
        sim = data
    sdims = ''.join(si_utils.get_spatial_dims_from_sim(sim))
    sdims = sdims.replace('zyx', 'xyz').replace('yx', 'xy')   # order xy(z)
    origin = si_utils.get_origin_from_sim(sim)
    translation = [origin[sdim] for sdim in sdims]

    if len(translation) == 0:
        translation = [0, 0]
    if len(translation) == 2:
        if translation0 is not None and len (translation0) == 3:
            z = translation0[2]
        else:
            z = 0
        translation = list(translation) + [z]

    if transform is not None:
        translation1, rotation1, _ = get_properties_from_transform(transform, invert=True)
        translation = np.array(translation) + translation1
        rotation += rotation1

    if transform_key is not None:
        transform1 = sim.transforms[transform_key]
        translation1, rotation1, _ = get_properties_from_transform(transform1, invert=True)
        rotation += rotation1

    return translation, rotation

get_default(x, default)

Source code in src\util.py
14
15
def get_default(x, default):
    return default if x is None else x

get_filetitle(filename)

Source code in src\util.py
135
136
137
138
def get_filetitle(filename: str) -> str:
    filebase = os.path.basename(filename)
    title = os.path.splitext(filebase)[0].rstrip('.ome')
    return title

get_image_quantile(image, quantile, axis=None)

Source code in src\image\util.py
454
455
456
def get_image_quantile(image: np.ndarray, quantile: float, axis=None) -> float:
    value = np.quantile(image, quantile, axis=axis).astype(image.dtype)
    return value

get_image_size_info(sizes_xyzct, pixel_nbytes, pixel_type, channels)

Source code in src\image\util.py
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def get_image_size_info(sizes_xyzct: list, pixel_nbytes: int, pixel_type: np.dtype, channels: list) -> str:
    image_size_info = 'XYZCT:'
    size = 0
    for i, size_xyzct in enumerate(sizes_xyzct):
        w, h, zs, cs, ts = size_xyzct
        size += np.int64(pixel_nbytes) * w * h * zs * cs * ts
        if i > 0:
            image_size_info += ','
        image_size_info += f' {w} {h} {zs} {cs} {ts}'
    image_size_info += f' Pixel type: {pixel_type} Uncompressed: {print_hbytes(size)}'
    if sizes_xyzct[0][3] == 3:
        channel_info = 'rgb'
    else:
        channel_info = ','.join([channel.get('Name', '') for channel in channels])
    if channel_info != '':
        image_size_info += f' Channels: {channel_info}'
    return image_size_info

get_image_window(image, low=0.01, high=0.99)

Source code in src\image\util.py
459
460
461
462
463
464
def get_image_window(image, low=0.01, high=0.99):
    window = (
        get_image_quantile(image, low),
        get_image_quantile(image, high)
    )
    return window

get_max_downsamples(shape, npyramid_add, pyramid_downsample)

Source code in src\image\util.py
498
499
500
501
502
503
504
505
def get_max_downsamples(shape, npyramid_add, pyramid_downsample):
    shape = list(shape)
    for i in range(npyramid_add):
        shape[-1] //= pyramid_downsample
        shape[-2] //= pyramid_downsample
        if shape[-1] < 1 or shape[-2] < 1:
            return i
    return npyramid_add

get_mean_nn_distance(points1, points2)

Source code in src\util.py
483
484
def get_mean_nn_distance(points1, points2):
    return np.mean([get_nn_distance(points1), get_nn_distance(points2)])

get_moments(data, offset=(0, 0))

Source code in src\util.py
330
331
332
def get_moments(data, offset=(0, 0)):
    moments = cv.moments((np.array(data) + offset).astype(np.float32))    # doesn't work for float64!
    return moments

get_moments_center(moments, offset=(0, 0))

Source code in src\util.py
335
336
def get_moments_center(moments, offset=(0, 0)):
    return np.array([moments['m10'], moments['m01']]) / moments['m00'] + np.array(offset)

get_nn_distance(points0)

Source code in src\util.py
472
473
474
475
476
477
478
479
480
def get_nn_distance(points0):
    points = list(set(map(tuple, points0)))     # get unique points
    if len(points) >= 2:
        tree = KDTree(points, leaf_size=2)
        dist, ind = tree.query(points, k=2)
        nn_distance = np.median(dist[:, 1])
    else:
        nn_distance = 1
    return nn_distance

get_numpy_slicing(dimension_order, **slicing)

Source code in src\image\util.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
def get_numpy_slicing(dimension_order, **slicing):
    slices = []
    for axis in dimension_order:
        index = slicing.get(axis)
        index0 = slicing.get(axis + '0')
        index1 = slicing.get(axis + '1')
        if index0 is not None and index1 is not None:
            slice1 = slice(int(index0), int(index1))
        elif index is not None:
            slice1 = int(index)
        else:
            slice1 = slice(None)
        slices.append(slice1)
    return tuple(slices)

get_orthogonal_pairs(origins, image_size_um)

Get pairs of orthogonal neighbors from a list of tiles. Tiles don't have to be placed on a regular grid.

Source code in src\util.py
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
def get_orthogonal_pairs(origins, image_size_um):
    """
    Get pairs of orthogonal neighbors from a list of tiles.
    Tiles don't have to be placed on a regular grid.
    """
    pairs = []
    angles = []
    z_positions = [pos[0] for pos in origins if len(pos) == 3]
    ordered_z = sorted(set(z_positions))
    is_mixed_3dstack = len(ordered_z) < len(z_positions)
    for i, j in np.transpose(np.triu_indices(len(origins), 1)):
        origini = np.array(origins[i])
        originj = np.array(origins[j])
        if is_mixed_3dstack:
            # ignore z value for distance
            distance = math.dist(origini[-2:], originj[-2:])
            min_distance = max(image_size_um[-2:])
            z_i, z_j = origini[0], originj[0]
            is_same_z = (z_i == z_j)
            is_close_z = abs(ordered_z.index(z_i) - ordered_z.index(z_j)) <= 1
            if not is_same_z:
                # for tiles in different z stack, require greater overlap
                min_distance *= 0.8
            ok = (distance < min_distance and is_close_z)
        else:
            distance = math.dist(origini, originj)
            min_distance = max(image_size_um)
            ok = (distance < min_distance)
        if ok:
            pairs.append((i, j))
            vector = origini - originj
            angle = math.degrees(math.atan2(vector[1], vector[0]))
            if distance < min(image_size_um):
                angle += 90
            while angle < -90:
                angle += 180
            while angle > 90:
                angle -= 180
            angles.append(angle)
    return pairs, angles

get_properties_from_transform(transform, invert=False)

Source code in src\image\util.py
660
661
662
663
664
665
666
667
668
669
670
671
def get_properties_from_transform(transform, invert=False):
    if len(transform.shape) == 3:
        transform = transform[0]
    if invert:
        transform = param_utils.invert_coordinate_order(transform)
    transform = np.array(transform)
    translation = param_utils.translation_from_affine(transform)
    if len(translation) == 2:
        translation = list(translation) + [0]
    rotation = get_rotation_from_transform(transform)
    scale = get_scale_from_transform(transform)
    return translation, rotation, scale

get_rotation_from_transform(transform)

Source code in src\util.py
427
428
429
def get_rotation_from_transform(transform):
    rotation = np.rad2deg(np.arctan2(transform[0][1], transform[0][0]))
    return rotation

get_scale_from_transform(transform)

Source code in src\util.py
397
398
399
def get_scale_from_transform(transform):
    scale = np.mean(np.linalg.norm(transform, axis=0)[:-1])
    return scale

get_sim_physical_size(sim, invert=False)

Source code in src\image\util.py
620
621
622
623
624
def get_sim_physical_size(sim, invert=False):
    size = si_utils.get_shape_from_sim(sim, asarray=True) * si_utils.get_spacing_from_sim(sim, asarray=True)
    if invert:
        size = np.flip(size)
    return size

get_sim_position_final(sim)

Source code in src\image\util.py
540
541
542
543
544
545
def get_sim_position_final(sim):
    transform_keys = si_utils.get_tranform_keys_from_sim(sim)
    transform = combine_transforms([np.array(si_utils.get_affine_from_sim(sim, transform_key))
                                    for transform_key in transform_keys])
    position = apply_transform([si_utils.get_origin_from_sim(sim, asarray=True)], transform)[0]
    return position

get_sim_shape_2d(sim, transform_key=None)

Source code in src\image\util.py
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
def get_sim_shape_2d(sim, transform_key=None):
    if 't' in sim.coords.xindexes:
        # work-around for points error in get_overlap_bboxes()
        sim1 = si_utils.sim_sel_coords(sim, {'t': 0})
    else:
        sim1 = sim
    stack_props = si_utils.get_stack_properties_from_sim(sim1, transform_key=transform_key)
    vertices = mv_graph.get_vertices_from_stack_props(stack_props)
    if vertices.shape[1] == 3:
        # remove z coordinate
        vertices = vertices[:, 1:]
    if len(vertices) >= 8:
        # remove redundant x/y vertices
        vertices = vertices[:4]
    if len(vertices) >= 4:
        # last 2 vertices appear to be swapped
        vertices[2:] = np.array(list(reversed(vertices[2:])))
    return vertices

get_translation_from_transform(transform)

Source code in src\util.py
402
403
404
405
406
def get_translation_from_transform(transform):
    ndim = len(transform) - 1
    #translation = transform[:ndim, ndim]
    translation = apply_transform([[0] * ndim], transform)[0]
    return translation

get_unique_file_labels(filenames)

Source code in src\util.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def get_unique_file_labels(filenames: list) -> list:
    file_labels = []
    file_parts = []
    label_indices = set()
    last_parts = None
    for filename in filenames:
        parts = split_numeric(filename)
        if len(parts) == 0:
            parts = split_numeric(filename)
            if len(parts) == 0:
                parts = filename
        file_parts.append(parts)
        if last_parts is not None:
            for parti, (part1, part2) in enumerate(zip(last_parts, parts)):
                if part1 != part2:
                    label_indices.add(parti)
        last_parts = parts
    label_indices = sorted(list(label_indices))

    for file_part in file_parts:
        file_label = '_'.join([file_part[i] for i in label_indices])
        file_labels.append(file_label)

    if len(set(file_labels)) < len(file_labels):
        # fallback for duplicate labels
        file_labels = [get_filetitle(filename) for filename in filenames]

    return file_labels

get_value_units_micrometer(value_units0)

Source code in src\util.py
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def get_value_units_micrometer(value_units0: list|dict) -> list|dict|None:
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    if value_units0 is None:
        return None

    if isinstance(value_units0, dict):
        values_um = {}
        for dim, value_unit in value_units0.items():
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um[dim] = value_um
    else:
        values_um = []
        for value_unit in value_units0:
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um.append(value_um)
    return values_um

grayscale_image(image)

Source code in src\image\util.py
32
33
34
35
36
37
38
39
def grayscale_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 4:
        return cv.cvtColor(image, cv.COLOR_RGBA2GRAY)
    elif nchannels > 1:
        return cv.cvtColor(image, cv.COLOR_RGB2GRAY)
    else:
        return image

group_sims_by_z(sims)

Source code in src\image\util.py
548
549
550
551
552
553
554
555
556
557
558
559
560
561
def group_sims_by_z(sims):
    grouped_sims = []
    z_positions = [si_utils.get_origin_from_sim(sim).get('z') for sim in sims]
    is_mixed_3dstack = len(set(z_positions)) < len(z_positions)
    if is_mixed_3dstack:
        sims_by_z = {}
        for simi, z_pos in enumerate(z_positions):
            if z_pos is not None and z_pos not in sims_by_z:
                sims_by_z[z_pos] = []
            sims_by_z[z_pos].append(simi)
        grouped_sims = list(sims_by_z.values())
    if len(grouped_sims) == 0:
        grouped_sims = [list(range(len(sims)))]
    return grouped_sims

image_reshape(image, target_size)

Source code in src\image\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
def image_reshape(image: np.ndarray, target_size: tuple) -> np.ndarray:
    tw, th = target_size
    sh, sw = image.shape[0:2]
    if sw < tw or sh < th:
        dw = max(tw - sw, 0)
        dh = max(th - sh, 0)
        padding = [(dh // 2, dh - dh //  2), (dw // 2, dw - dw // 2)]
        if len(image.shape) == 3:
            padding += [(0, 0)]
        image = np.pad(image, padding, mode='constant', constant_values=(0, 0))
    if tw < sw or th < sh:
        image = image[0:th, 0:tw]
    return image

image_resize(image, target_size0, dimension_order='yxc')

Source code in src\image\util.py
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
def image_resize(image: np.ndarray, target_size0: tuple, dimension_order: str = 'yxc') -> np.ndarray:
    shape = image.shape
    x_index = dimension_order.index('x')
    y_index = dimension_order.index('y')
    c_is_at_end = ('c' in dimension_order and dimension_order.endswith('c'))
    size = shape[x_index], shape[y_index]
    if np.mean(np.divide(size, target_size0)) < 1:
        interpolation = cv.INTER_CUBIC
    else:
        interpolation = cv.INTER_AREA
    dtype0 = image.dtype
    image = ensure_unsigned_image(image)
    target_size = tuple(np.maximum(np.round(target_size0).astype(int), 1))
    if dimension_order in ['yxc', 'yx']:
        new_image = cv.resize(np.asarray(image), target_size, interpolation=interpolation)
    elif dimension_order == 'cyx':
        new_image = np.moveaxis(image, 0, -1)
        new_image = cv.resize(np.asarray(new_image), target_size, interpolation=interpolation)
        new_image = np.moveaxis(new_image, -1, 0)
    else:
        ts = image.shape[dimension_order.index('t')] if 't' in dimension_order else 1
        zs = image.shape[dimension_order.index('z')] if 'z' in dimension_order else 1
        target_shape = list(image.shape).copy()
        target_shape[x_index] = target_size[0]
        target_shape[y_index] = target_size[1]
        new_image = np.zeros(target_shape, dtype=image.dtype)
        for t in range(ts):
            for z in range(zs):
                slices = get_numpy_slicing(dimension_order, z=z, t=t)
                image1 = image[slices]
                if not c_is_at_end:
                    image1 = np.moveaxis(image1, 0, -1)
                new_image1 = np.atleast_3d(cv.resize(np.asarray(image1), target_size, interpolation=interpolation))
                if not c_is_at_end:
                    new_image1 = np.moveaxis(new_image1, -1, 0)
                new_image[slices] = new_image1
    new_image = convert_image_sign_type(new_image, dtype0)
    return new_image

import_csv(filename)

Source code in src\util.py
596
597
598
599
def import_csv(filename):
    with open(filename, encoding='utf8') as file:
        data = csv.reader(file)
    return data

import_json(filename)

Source code in src\util.py
585
586
587
588
def import_json(filename):
    with open(filename, encoding='utf8') as file:
        data = json.load(file)
    return data

import_metadata(content, fields=None, input_path=None)

Source code in src\util.py
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
def import_metadata(content, fields=None, input_path=None):
    # return dict[id] = {values}
    if isinstance(content, str):
        ext = os.path.splitext(content)[1].lower()
        if input_path:
            if isinstance(input_path, list):
                input_path = input_path[0]
            content = os.path.normpath(os.path.join(os.path.dirname(input_path), content))
        if ext == '.csv':
            content = import_csv(content)
        elif ext in ['.json', '.ome.json']:
            content = import_json(content)
    if fields is not None:
        content = [[data[field] for field in fields] for data in content]
    return content

int2float_image(image)

Source code in src\image\util.py
50
51
52
53
54
55
56
def int2float_image(image):
    source_dtype = image.dtype
    if not source_dtype.kind == 'f':
        maxval = 2 ** (8 * source_dtype.itemsize) - 1
        return image / np.float32(maxval)
    else:
        return image

norm_image_quantiles(image0, quantile=0.99)

Source code in src\image\util.py
484
485
486
487
488
489
490
491
492
493
494
495
def norm_image_quantiles(image0, quantile=0.99):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    min_value = np.quantile(image, 1 - quantile)
    max_value = np.quantile(image, quantile)
    normimage = (image - np.mean(image)) / (max_value - min_value)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

norm_image_variance(image0)

Source code in src\image\util.py
472
473
474
475
476
477
478
479
480
481
def norm_image_variance(image0):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    normimage = (image - np.mean(image)) / np.std(image)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

normalise(sims, transform_key, use_global=True)

Source code in src\image\util.py
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
def normalise(sims, transform_key, use_global=True):
    new_sims = []
    dtype = sims[0].dtype
    # global mean and stddev
    if use_global:
        mins = []
        ranges = []
        for sim in sims:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
            #min, max = get_image_window(sim, low=0.01, high=0.99)
            #range = max - min
            mins.append(min)
            ranges.append(range)
        min = np.mean(mins)
        range = np.mean(ranges)
    else:
        min = 0
        range = 1
    # normalise all images
    for sim in sims:
        if not use_global:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
        image = (sim - min) / range
        image = float2int_image(image.clip(0, 1), dtype)    # np.clip(image) is not dask-compatible, use image.clip() instead
        new_sim = si_utils.get_sim_from_array(
            image,
            dims=sim.dims,
            scale=si_utils.get_spacing_from_sim(sim),
            translation=si_utils.get_origin_from_sim(sim),
            transform_key=transform_key,
            affine=si_utils.get_affine_from_sim(sim, transform_key),
            c_coords=sim.c.data,
            t_coords=sim.t.data
        )
        new_sims.append(new_sim)
    return new_sims

normalise_rotated_positions(positions0, rotations0, size, center)

Source code in src\util.py
456
457
458
459
460
461
462
463
464
465
466
467
468
469
def normalise_rotated_positions(positions0, rotations0, size, center):
    # in [xy(z)]
    positions = []
    rotations = []
    _, angles = get_orthogonal_pairs(positions0, size)
    for position0, rotation in zip(positions0, rotations0):
        if rotation is None and len(angles) > 0:
            rotation = -np.mean(angles)
        angle = -rotation if rotation is not None else None
        transform = create_transform(center=center, angle=angle, matrix_size=4)
        position = apply_transform([position0], transform)[0]
        positions.append(position)
        rotations.append(rotation)
    return positions, rotations

normalise_rotation(rotation)

Normalise rotation to be in the range [-180, 180].

Source code in src\util.py
432
433
434
435
436
437
438
439
440
def normalise_rotation(rotation):
    """
    Normalise rotation to be in the range [-180, 180].
    """
    while rotation < -180:
        rotation += 360
    while rotation > 180:
        rotation -= 360
    return rotation

normalise_values(image, min_value, max_value)

Source code in src\image\util.py
467
468
469
def normalise_values(image: np.ndarray, min_value: float, max_value: float) -> np.ndarray:
    image = (image.astype(np.float32) - min_value) / (max_value - min_value)
    return image.clip(0, 1)

pilmode_to_pixelinfo(mode)

Source code in src\image\util.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
def pilmode_to_pixelinfo(mode: str) -> tuple:
    pixelinfo = (np.uint8, 8, 1)
    mode_types = {
        'I': (np.uint32, 32, 1),
        'F': (np.float32, 32, 1),
        'RGB': (np.uint8, 24, 3),
        'RGBA': (np.uint8, 32, 4),
        'CMYK': (np.uint8, 32, 4),
        'YCbCr': (np.uint8, 24, 3),
        'LAB': (np.uint8, 24, 3),
        'HSV': (np.uint8, 24, 3),
    }
    if '16' in mode:
        pixelinfo = (np.uint16, 16, 1)
    elif '32' in mode:
        pixelinfo = (np.uint32, 32, 1)
    elif mode in mode_types:
        pixelinfo = mode_types[mode]
    pixelinfo = (np.dtype(pixelinfo[0]), pixelinfo[1])
    return pixelinfo

points_to_3d(points)

Source code in src\util.py
443
444
def points_to_3d(points):
    return [list(point) + [0] for point in points]

precise_resize(image, factors)

Source code in src\image\util.py
272
273
274
275
276
def precise_resize(image: np.ndarray, factors) -> np.ndarray:
    if image.ndim > len(factors):
        factors = list(factors) + [1]
    new_image = downscale_local_mean(np.asarray(image), tuple(factors)).astype(image.dtype)
    return new_image

print_dict(dct, indent=0)

Source code in src\util.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def print_dict(dct: dict, indent: int = 0) -> str:
    s = ''
    if isinstance(dct, dict):
        for key, value in dct.items():
            s += '\n'
            if not isinstance(value, list):
                s += '\t' * indent + str(key) + ': '
            if isinstance(value, dict):
                s += print_dict(value, indent=indent + 1)
            elif isinstance(value, list):
                for v in value:
                    s += print_dict(v)
            else:
                s += str(value)
    else:
        s += str(dct)
    return s

print_hbytes(nbytes)

Source code in src\util.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def print_hbytes(nbytes: int) -> str:
    exps = ['', 'K', 'M', 'G', 'T', 'P', 'E']
    div = 1024
    exp = 0

    while nbytes > div:
        nbytes /= div
        exp += 1
    if exp < len(exps):
        e = exps[exp]
    else:
        e = f'e{exp * 3}'
    return f'{nbytes:.1f}{e}B'

redimension_data(data, old_order, new_order, **indices)

Source code in src\image\util.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def redimension_data(data, old_order, new_order, **indices):
    # able to provide optional dimension values e.g. t=0, z=0
    if new_order == old_order:
        return data

    new_data = data
    order = old_order
    # remove
    for o in old_order:
        if o not in new_order:
            index = order.index(o)
            dim_value = indices.get(o, 0)
            new_data = np.take(new_data, indices=dim_value, axis=index)
            order = order[:index] + order[index + 1:]
    # add
    for o in new_order:
        if o not in order:
            new_data = np.expand_dims(new_data, 0)
            order = o + order
    # move
    old_indices = [order.index(o) for o in new_order]
    new_indices = list(range(len(new_order)))
    new_data = np.moveaxis(new_data, old_indices, new_indices)
    return new_data

reorder(items, old_order, new_order, default_value=0)

Source code in src\util.py
27
28
29
30
31
32
33
34
35
def reorder(items: list, old_order: str, new_order: str, default_value: int = 0) -> list:
    new_items = []
    for label in new_order:
        if label in old_order:
            item = items[old_order.index(label)]
        else:
            item = default_value
        new_items.append(item)
    return new_items

resize_image(image, new_size)

Source code in src\image\util.py
224
225
226
227
228
229
def resize_image(image, new_size):
    if not isinstance(new_size, (tuple, list, np.ndarray)):
        # use single value for width; apply aspect ratio
        size = np.flip(image.shape[:2])
        new_size = new_size, new_size * size[1] // size[0]
    return cv.resize(image, new_size)

retuple(chunks, shape)

Expand chunks to match shape.

E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028) return (3, 4, 5, 64, 64)

If chunks is an integer, it is applied to all dimensions, to match the behaviour of zarr-python.

Source code in src\util.py
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def retuple(chunks, shape):
    # from ome-zarr-py
    """
    Expand chunks to match shape.

    E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028)
    return (3, 4, 5, 64, 64)

    If chunks is an integer, it is applied to all dimensions, to match
    the behaviour of zarr-python.
    """

    if isinstance(chunks, int):
        return tuple([chunks] * len(shape))

    dims_to_add = len(shape) - len(chunks)
    return *shape[:dims_to_add], *chunks

round_significants(a, significant_digits)

Source code in src\util.py
124
125
126
127
128
def round_significants(a: float, significant_digits: int) -> float:
    if a != 0:
        round_decimals = significant_digits - int(np.floor(np.log10(abs(a)))) - 1
        return round(a, round_decimals)
    return a

save_image(filename, output_format, sim, transform_key=None, channels=None, translation0=None, params={})

Source code in src\image\ome_helper.py
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
def save_image(filename, output_format, sim, transform_key=None, channels=None, translation0=None, params={}):
    dimension_order = ''.join(sim.dims)
    sdims = ''.join(si_utils.get_spatial_dims_from_sim(sim))
    sdims = sdims.replace('zyx', 'xyz').replace('yx', 'xy')   # order xy(z)
    pixel_size = []
    for dim in sdims:
        pixel_size1 = si_utils.get_spacing_from_sim(sim)[dim]
        if pixel_size1 == 0:
            pixel_size1 = 1
        pixel_size.append(pixel_size1)
    # metadata: only use coords of fused image
    position, rotation = get_data_mapping(sim, transform_key=transform_key, translation0=translation0)
    nplanes = sim.sizes.get('z', 1) * sim.sizes.get('c', 1) * sim.sizes.get('t', 1)
    positions = [position] * nplanes

    if channels is None:
        channels = sim.attrs.get('channels', [])

    tile_size = params.get('tile_size')
    if tile_size:
        if 'z' in sim.dims and len(tile_size) < 3:
            tile_size = list(tile_size) + [1]
        tile_size = tuple(reversed(tile_size))
        chunking = retuple(tile_size, sim.shape)
        sim = sim.chunk(chunks=chunking)

    compression = params.get('compression')
    pyramid_downsample = params.get('pyramid_downsample', 2)
    npyramid_add = get_max_downsamples(sim.shape, params.get('npyramid_add', 0), pyramid_downsample)
    scaler = Scaler(downscale=pyramid_downsample, max_layer=npyramid_add)

    if 'zar' in output_format:
        #save_ome_zarr(str(filename) + zarr_extension, sim.data, dimension_order, pixel_size,
        #              channels, position, rotation, compression=compression, scaler=scaler,
        #              zarr_version=3, ome_version='0.5')
        save_ome_ngff(str(filename) + zarr_extension, sim, channels, position, rotation,
                      pyramid_downsample=pyramid_downsample)
    if 'tif' in output_format:
        save_ome_tiff(str(filename) + tiff_extension, sim.data, dimension_order, pixel_size,
                      channels, positions, rotation, tile_size=tile_size, compression=compression, scaler=scaler)

show_image(image, title='', cmap=None)

Source code in src\image\util.py
22
23
24
25
26
27
28
29
def show_image(image, title='', cmap=None):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if cmap is None:
        cmap = 'gray' if nchannels == 1 else None
    plt.imshow(image, cmap=cmap)
    if title != '':
        plt.title(title)
    plt.show()

split_num_text(text)

Source code in src\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
def split_num_text(text: str) -> list:
    num_texts = []
    block = ''
    is_num0 = None
    if text is None:
        return None

    for c in text:
        is_num = (c.isnumeric() or c == '.')
        if is_num0 is not None and is_num != is_num0:
            num_texts.append(block)
            block = ''
        block += c
        is_num0 = is_num
    if block != '':
        num_texts.append(block)

    num_texts2 = []
    for block in num_texts:
        block = block.strip()
        try:
            block = float(block)
        except:
            pass
        if block not in [' ', ',', '|']:
            num_texts2.append(block)
    return num_texts2

split_numeric(text)

Source code in src\util.py
153
154
155
156
157
158
159
160
def split_numeric(text: str) -> list:
    num_parts = []
    parts = re.split(r'[_/\\.]', text)
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            num_parts.append(part)
    return num_parts

split_numeric_dict(text)

Source code in src\util.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
def split_numeric_dict(text: str) -> dict:
    num_parts = {}
    parts = re.split(r'[_/\\.]', text)
    parti = 0
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            index = num_span.start()
            label = part[:index]
            if label == '':
                label = parti
            num_parts[label] = num_span.group()
            parti += 1
    return num_parts

split_path(path)

Source code in src\util.py
131
132
def split_path(path: str) -> list:
    return os.path.normpath(path).split(os.path.sep)

split_value_unit_list(text)

Source code in src\util.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
def split_value_unit_list(text: str) -> list:
    value_units = []
    if text is None:
        return None

    items = split_num_text(text)
    if isinstance(items[-1], str):
        def_unit = items[-1]
    else:
        def_unit = ''

    i = 0
    while i < len(items):
        value = items[i]
        if i + 1 < len(items):
            unit = items[i + 1]
        else:
            unit = ''
        if not isinstance(value, str):
            if isinstance(unit, str):
                i += 1
            else:
                unit = def_unit
            value_units.append((value, unit))
        i += 1
    return value_units

uint8_image(image)

Source code in src\image\util.py
68
69
70
71
72
73
74
75
def uint8_image(image):
    source_dtype = image.dtype
    if source_dtype.kind == 'f':
        image = image * 255
    elif source_dtype.itemsize != 1:
        factor = 2 ** (8 * (source_dtype.itemsize - 1))
        image = image // factor
    return image.astype(np.uint8)

validate_transform(transform, max_rotation=None)

Source code in src\util.py
382
383
384
385
386
387
388
389
390
391
392
393
394
def validate_transform(transform, max_rotation=None):
    if transform is None:
        return False
    transform = np.array(transform)
    if np.any(np.isnan(transform)):
        return False
    if np.any(np.isinf(transform)):
        return False
    if np.linalg.det(transform) == 0:
        return False
    if  max_rotation is not None and abs(normalise_rotation(get_rotation_from_transform(transform))) > max_rotation:
        return False
    return True

xyz_to_dict(xyz, axes='xyz')

Source code in src\util.py
447
448
449
def xyz_to_dict(xyz, axes='xyz'):
    dct = {dim: value for dim, value in zip(axes, xyz)}
    return dct

ome_ngff_helper

save_ome_ngff(filename, sim, channels=None, translation=None, rotation=None, compression=None, pyramid_downsample=2)

Source code in src\image\ome_ngff_helper.py
 4
 5
 6
 7
 8
 9
10
11
12
13
def save_ome_ngff(filename, sim, channels=None, translation=None, rotation=None,
                  compression=None, pyramid_downsample=2):
    pyramid_downsample_dict = {}
    for dim in sim.dims:
        if dim in 'xy':
            pyramid_downsample_dict[dim] = pyramid_downsample
        else:
            pyramid_downsample_dict[dim] = 1
    ngff_utils.write_sim_to_ome_zarr(sim, filename,
                                     downscale_factors_per_spatial_dim=pyramid_downsample_dict, overwrite=True)

ome_tiff_helper

apply_transform(points, transform)

Source code in src\util.py
371
372
373
374
375
376
377
378
379
def apply_transform(points, transform):
    new_points = []
    for point in points:
        point_len = len(point)
        while len(point) < len(transform):
            point = list(point) + [1]
        new_point = np.dot(point, np.transpose(np.array(transform)))
        new_points.append(new_point[:point_len])
    return new_points

check_round_significants(a, significant_digits)

Source code in src\util.py
113
114
115
116
117
118
119
120
121
def check_round_significants(a: float, significant_digits: int) -> float:
    rounded = round_significants(a, significant_digits)
    if a != 0:
        dif = 1 - rounded / a
    else:
        dif = rounded - a
    if abs(dif) < 10 ** -significant_digits:
        return rounded
    return a

convert_rational_value(value)

Source code in src\util.py
321
322
323
324
325
326
327
def convert_rational_value(value) -> float:
    if value is not None and isinstance(value, tuple):
        if value[0] == value[1]:
            value = value[0]
        else:
            value = value[0] / value[1]
    return value

convert_to_um(value, unit)

Source code in src\util.py
310
311
312
313
314
315
316
317
318
def convert_to_um(value, unit):
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    return value * conversions.get(unit, 1)

create_tiff_metadata(pixel_size, dimension_order=None, channels=[], positions=[], rotation=None, is_ome=False)

Source code in src\image\ome_tiff_helper.py
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
def create_tiff_metadata(pixel_size, dimension_order=None, channels=[], positions=[], rotation=None, is_ome=False):
    ome_metadata = None
    resolution = None
    resolution_unit = None
    pixel_size_um = None

    if pixel_size is not None:
        pixel_size_um = get_value_units_micrometer(pixel_size)[:2]
        resolution_unit = 'CENTIMETER'
        resolution = [1e4 / size for size in pixel_size_um]

    if is_ome:
        ome_metadata = {'Creator': 'muvis-align'}
        if dimension_order is not None:
            #ome_metadata['DimensionOrder'] = dimension_order[::-1].upper()
            ome_metadata['axes'] = dimension_order.upper()
        ome_channels = []
        if pixel_size_um is not None:
            ome_metadata['PhysicalSizeX'] = float(pixel_size_um[0])
            ome_metadata['PhysicalSizeXUnit'] = 'µm'
            ome_metadata['PhysicalSizeY'] = float(pixel_size_um[1])
            ome_metadata['PhysicalSizeYUnit'] = 'µm'
            if len(pixel_size_um) > 2:
                ome_metadata['PhysicalSizeZ'] = float(pixel_size_um[2])
                ome_metadata['PhysicalSizeZUnit'] = 'µm'
        if positions is not None and len(positions) > 0:
            plane_metadata = {}
            plane_metadata['PositionX'] = [float(position[0]) for position in positions]
            plane_metadata['PositionXUnit'] = ['µm' for _ in positions]
            plane_metadata['PositionY'] = [float(position[1]) for position in positions]
            plane_metadata['PositionYUnit'] = ['µm' for _ in positions]
            if len(positions[0]) > 2:
                plane_metadata['PositionZ'] = [float(position[2]) for position in positions]
                plane_metadata['PositionZUnit'] = ['µm' for _ in positions]
            ome_metadata['Plane'] = plane_metadata
        if rotation is not None:
            ome_metadata['StructuredAnnotations'] = {'CommentAnnotation': {'Value': f'Angle: {rotation} degrees'}}
        for channeli, channel in enumerate(channels):
            ome_channel = {'Name': channel.get('label', str(channeli))}
            if 'color' in channel:
                ome_channel['Color'] = rgba_to_int(channel['color'])
            ome_channels.append(ome_channel)
        if ome_channels:
            ome_metadata['Channel'] = ome_channels
    return ome_metadata, resolution, resolution_unit

create_transform(center, angle, matrix_size=3)

Source code in src\util.py
356
357
358
359
360
361
362
363
364
365
366
367
368
def create_transform(center, angle, matrix_size=3):
    if isinstance(center, dict):
        center = dict_to_xyz(center)
    if len(center) == 2:
        center = np.array(list(center) + [0])
    if angle is None:
        angle = 0
    r = Rotation.from_euler('z', angle, degrees=True)
    t = center - r.apply(center, inverse=True)
    transform = np.eye(matrix_size)
    transform[:3, :3] = np.transpose(r.as_matrix())
    transform[:3, -1] += t
    return transform

create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0))

Source code in src\util.py
348
349
350
351
352
353
def create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0)):
    transform = cv.getRotationMatrix2D(center[:2], angle, scale)
    transform[:, 2] += translate
    if len(transform) == 2:
        transform = np.vstack([transform, [0, 0, 1]])   # create 3x3 matrix
    return transform

desc_to_dict(desc)

Source code in src\util.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def desc_to_dict(desc: str) -> dict:
    desc_dict = {}
    if desc.startswith('{'):
        try:
            metadata = ast.literal_eval(desc)
            return metadata
        except:
            pass
    for item in re.split(r'[\r\n\t|]', desc):
        item_sep = '='
        if ':' in item:
            item_sep = ':'
        if item_sep in item:
            items = item.split(item_sep)
            key = items[0].strip()
            value = items[1].strip()
            for dtype in (int, float, bool):
                try:
                    value = dtype(value)
                    break
                except:
                    pass
            desc_dict[key] = value
    return desc_dict

dict_to_xyz(dct, keys='xyz')

Source code in src\util.py
452
453
def dict_to_xyz(dct, keys='xyz'):
    return [dct[key] for key in keys if key in dct]

dir_regex(pattern)

Source code in src\util.py
141
142
143
144
145
146
def dir_regex(pattern):
    files = []
    for pattern_item in ensure_list(pattern):
        files.extend(glob.glob(pattern_item, recursive=True))
    files_sorted = sorted(files, key=lambda file: find_all_numbers(get_filetitle(file)))
    return files_sorted

draw_edge_filter(bounds)

Source code in src\util.py
496
497
498
499
500
501
502
503
504
def draw_edge_filter(bounds):
    out_image = np.zeros(np.flip(bounds))
    y, x = np.where(out_image == 0)
    points = np.transpose([x, y])

    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) * 10, 0, 1)
    return position_weights.reshape(np.flip(bounds))

ensure_list(x)

Source code in src\util.py
18
19
20
21
22
23
24
def ensure_list(x) -> list:
    if x is None:
        return []
    elif isinstance(x, list):
        return x
    else:
        return [x]

eval_context(data, key, default_value, context)

Source code in src\util.py
266
267
268
269
270
271
272
273
274
275
276
277
def eval_context(data, key, default_value, context):
    value = data.get(key, default_value)
    if isinstance(value, str):
        try:
            value = value.format_map(context)
        except:
            pass
        try:
            value = eval(value, context)
        except:
            pass
    return value

export_csv(filename, data, header=None)

Source code in src\util.py
602
603
604
605
606
607
608
def export_csv(filename, data, header=None):
    with open(filename, 'w', encoding='utf8', newline='') as file:
        csvwriter = csv.writer(file)
        if header is not None:
            csvwriter.writerow(header)
        for row in data:
            csvwriter.writerow(row)

export_json(filename, data)

Source code in src\util.py
591
592
593
def export_json(filename, data):
    with open(filename, 'w', encoding='utf8') as file:
        json.dump(data, file, indent=4)

filter_dict(dict0)

Source code in src\util.py
38
39
40
41
42
43
44
45
46
47
48
49
50
def filter_dict(dict0: dict) -> dict:
    new_dict = {}
    for key, value0 in dict0.items():
        if value0 is not None:
            values = []
            for value in ensure_list(value0):
                if isinstance(value, dict):
                    value = filter_dict(value)
                values.append(value)
            if len(values) == 1:
                values = values[0]
            new_dict[key] = values
    return new_dict

filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5)

Source code in src\util.py
487
488
489
490
491
492
493
def filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5):
    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) / filter_factor, 0, 1)
    order_weights = 1 - np.array(range(len(points))) / len(points) / 2
    weights = position_weights * order_weights
    return weights > threshold

find_all_numbers(text)

Source code in src\util.py
149
150
def find_all_numbers(text: str) -> list:
    return list(map(int, re.findall(r'\d+', text)))

get_center(data, offset=(0, 0))

Source code in src\util.py
339
340
341
342
343
344
345
def get_center(data, offset=(0, 0)):
    moments = get_moments(data, offset=offset)
    if moments['m00'] != 0:
        center = get_moments_center(moments)
    else:
        center = np.mean(data, 0).flatten()  # close approximation
    return center.astype(np.float32)

get_center_from_transform(transform)

Source code in src\util.py
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
def get_center_from_transform(transform):
    # from opencv:
    # t0 = (1-alpha) * cx - beta * cy
    # t1 = beta * cx + (1-alpha) * cy
    # where
    # alpha = cos(angle) * scale
    # beta = sin(angle) * scale
    # isolate cx and cy:
    t0, t1 = transform[:2, 2]
    scale = 1
    angle = np.arctan2(transform[0][1], transform[0][0])
    alpha = np.cos(angle) * scale
    beta = np.sin(angle) * scale
    cx = (t1 + t0 * (1 - alpha) / beta) / (beta + (1 - alpha) ** 2 / beta)
    cy = ((1 - alpha) * cx - t0) / beta
    return cx, cy

get_default(x, default)

Source code in src\util.py
14
15
def get_default(x, default):
    return default if x is None else x

get_filetitle(filename)

Source code in src\util.py
135
136
137
138
def get_filetitle(filename: str) -> str:
    filebase = os.path.basename(filename)
    title = os.path.splitext(filebase)[0].rstrip('.ome')
    return title

get_mean_nn_distance(points1, points2)

Source code in src\util.py
483
484
def get_mean_nn_distance(points1, points2):
    return np.mean([get_nn_distance(points1), get_nn_distance(points2)])

get_moments(data, offset=(0, 0))

Source code in src\util.py
330
331
332
def get_moments(data, offset=(0, 0)):
    moments = cv.moments((np.array(data) + offset).astype(np.float32))    # doesn't work for float64!
    return moments

get_moments_center(moments, offset=(0, 0))

Source code in src\util.py
335
336
def get_moments_center(moments, offset=(0, 0)):
    return np.array([moments['m10'], moments['m01']]) / moments['m00'] + np.array(offset)

get_nn_distance(points0)

Source code in src\util.py
472
473
474
475
476
477
478
479
480
def get_nn_distance(points0):
    points = list(set(map(tuple, points0)))     # get unique points
    if len(points) >= 2:
        tree = KDTree(points, leaf_size=2)
        dist, ind = tree.query(points, k=2)
        nn_distance = np.median(dist[:, 1])
    else:
        nn_distance = 1
    return nn_distance

get_orthogonal_pairs(origins, image_size_um)

Get pairs of orthogonal neighbors from a list of tiles. Tiles don't have to be placed on a regular grid.

Source code in src\util.py
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
def get_orthogonal_pairs(origins, image_size_um):
    """
    Get pairs of orthogonal neighbors from a list of tiles.
    Tiles don't have to be placed on a regular grid.
    """
    pairs = []
    angles = []
    z_positions = [pos[0] for pos in origins if len(pos) == 3]
    ordered_z = sorted(set(z_positions))
    is_mixed_3dstack = len(ordered_z) < len(z_positions)
    for i, j in np.transpose(np.triu_indices(len(origins), 1)):
        origini = np.array(origins[i])
        originj = np.array(origins[j])
        if is_mixed_3dstack:
            # ignore z value for distance
            distance = math.dist(origini[-2:], originj[-2:])
            min_distance = max(image_size_um[-2:])
            z_i, z_j = origini[0], originj[0]
            is_same_z = (z_i == z_j)
            is_close_z = abs(ordered_z.index(z_i) - ordered_z.index(z_j)) <= 1
            if not is_same_z:
                # for tiles in different z stack, require greater overlap
                min_distance *= 0.8
            ok = (distance < min_distance and is_close_z)
        else:
            distance = math.dist(origini, originj)
            min_distance = max(image_size_um)
            ok = (distance < min_distance)
        if ok:
            pairs.append((i, j))
            vector = origini - originj
            angle = math.degrees(math.atan2(vector[1], vector[0]))
            if distance < min(image_size_um):
                angle += 90
            while angle < -90:
                angle += 180
            while angle > 90:
                angle -= 180
            angles.append(angle)
    return pairs, angles

get_rotation_from_transform(transform)

Source code in src\util.py
427
428
429
def get_rotation_from_transform(transform):
    rotation = np.rad2deg(np.arctan2(transform[0][1], transform[0][0]))
    return rotation

get_scale_from_transform(transform)

Source code in src\util.py
397
398
399
def get_scale_from_transform(transform):
    scale = np.mean(np.linalg.norm(transform, axis=0)[:-1])
    return scale

get_translation_from_transform(transform)

Source code in src\util.py
402
403
404
405
406
def get_translation_from_transform(transform):
    ndim = len(transform) - 1
    #translation = transform[:ndim, ndim]
    translation = apply_transform([[0] * ndim], transform)[0]
    return translation

get_unique_file_labels(filenames)

Source code in src\util.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def get_unique_file_labels(filenames: list) -> list:
    file_labels = []
    file_parts = []
    label_indices = set()
    last_parts = None
    for filename in filenames:
        parts = split_numeric(filename)
        if len(parts) == 0:
            parts = split_numeric(filename)
            if len(parts) == 0:
                parts = filename
        file_parts.append(parts)
        if last_parts is not None:
            for parti, (part1, part2) in enumerate(zip(last_parts, parts)):
                if part1 != part2:
                    label_indices.add(parti)
        last_parts = parts
    label_indices = sorted(list(label_indices))

    for file_part in file_parts:
        file_label = '_'.join([file_part[i] for i in label_indices])
        file_labels.append(file_label)

    if len(set(file_labels)) < len(file_labels):
        # fallback for duplicate labels
        file_labels = [get_filetitle(filename) for filename in filenames]

    return file_labels

get_value_units_micrometer(value_units0)

Source code in src\util.py
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def get_value_units_micrometer(value_units0: list|dict) -> list|dict|None:
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    if value_units0 is None:
        return None

    if isinstance(value_units0, dict):
        values_um = {}
        for dim, value_unit in value_units0.items():
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um[dim] = value_um
    else:
        values_um = []
        for value_unit in value_units0:
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um.append(value_um)
    return values_um

import_csv(filename)

Source code in src\util.py
596
597
598
599
def import_csv(filename):
    with open(filename, encoding='utf8') as file:
        data = csv.reader(file)
    return data

import_json(filename)

Source code in src\util.py
585
586
587
588
def import_json(filename):
    with open(filename, encoding='utf8') as file:
        data = json.load(file)
    return data

import_metadata(content, fields=None, input_path=None)

Source code in src\util.py
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
def import_metadata(content, fields=None, input_path=None):
    # return dict[id] = {values}
    if isinstance(content, str):
        ext = os.path.splitext(content)[1].lower()
        if input_path:
            if isinstance(input_path, list):
                input_path = input_path[0]
            content = os.path.normpath(os.path.join(os.path.dirname(input_path), content))
        if ext == '.csv':
            content = import_csv(content)
        elif ext in ['.json', '.ome.json']:
            content = import_json(content)
    if fields is not None:
        content = [[data[field] for field in fields] for data in content]
    return content

load_tiff(filename)

Source code in src\image\ome_tiff_helper.py
7
8
def load_tiff(filename):
    return tifffile.imread(filename)

normalise_rotated_positions(positions0, rotations0, size, center)

Source code in src\util.py
456
457
458
459
460
461
462
463
464
465
466
467
468
469
def normalise_rotated_positions(positions0, rotations0, size, center):
    # in [xy(z)]
    positions = []
    rotations = []
    _, angles = get_orthogonal_pairs(positions0, size)
    for position0, rotation in zip(positions0, rotations0):
        if rotation is None and len(angles) > 0:
            rotation = -np.mean(angles)
        angle = -rotation if rotation is not None else None
        transform = create_transform(center=center, angle=angle, matrix_size=4)
        position = apply_transform([position0], transform)[0]
        positions.append(position)
        rotations.append(rotation)
    return positions, rotations

normalise_rotation(rotation)

Normalise rotation to be in the range [-180, 180].

Source code in src\util.py
432
433
434
435
436
437
438
439
440
def normalise_rotation(rotation):
    """
    Normalise rotation to be in the range [-180, 180].
    """
    while rotation < -180:
        rotation += 360
    while rotation > 180:
        rotation -= 360
    return rotation

points_to_3d(points)

Source code in src\util.py
443
444
def points_to_3d(points):
    return [list(point) + [0] for point in points]

print_dict(dct, indent=0)

Source code in src\util.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def print_dict(dct: dict, indent: int = 0) -> str:
    s = ''
    if isinstance(dct, dict):
        for key, value in dct.items():
            s += '\n'
            if not isinstance(value, list):
                s += '\t' * indent + str(key) + ': '
            if isinstance(value, dict):
                s += print_dict(value, indent=indent + 1)
            elif isinstance(value, list):
                for v in value:
                    s += print_dict(v)
            else:
                s += str(value)
    else:
        s += str(dct)
    return s

print_hbytes(nbytes)

Source code in src\util.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def print_hbytes(nbytes: int) -> str:
    exps = ['', 'K', 'M', 'G', 'T', 'P', 'E']
    div = 1024
    exp = 0

    while nbytes > div:
        nbytes /= div
        exp += 1
    if exp < len(exps):
        e = exps[exp]
    else:
        e = f'e{exp * 3}'
    return f'{nbytes:.1f}{e}B'

reorder(items, old_order, new_order, default_value=0)

Source code in src\util.py
27
28
29
30
31
32
33
34
35
def reorder(items: list, old_order: str, new_order: str, default_value: int = 0) -> list:
    new_items = []
    for label in new_order:
        if label in old_order:
            item = items[old_order.index(label)]
        else:
            item = default_value
        new_items.append(item)
    return new_items

retuple(chunks, shape)

Expand chunks to match shape.

E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028) return (3, 4, 5, 64, 64)

If chunks is an integer, it is applied to all dimensions, to match the behaviour of zarr-python.

Source code in src\util.py
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def retuple(chunks, shape):
    # from ome-zarr-py
    """
    Expand chunks to match shape.

    E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028)
    return (3, 4, 5, 64, 64)

    If chunks is an integer, it is applied to all dimensions, to match
    the behaviour of zarr-python.
    """

    if isinstance(chunks, int):
        return tuple([chunks] * len(shape))

    dims_to_add = len(shape) - len(chunks)
    return *shape[:dims_to_add], *chunks

round_significants(a, significant_digits)

Source code in src\util.py
124
125
126
127
128
def round_significants(a: float, significant_digits: int) -> float:
    if a != 0:
        round_decimals = significant_digits - int(np.floor(np.log10(abs(a)))) - 1
        return round(a, round_decimals)
    return a

save_ome_tiff(filename, data, dimension_order, pixel_size, channels=[], positions=[], rotation=None, tile_size=None, compression=None, scaler=None)

Source code in src\image\ome_tiff_helper.py
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
def save_ome_tiff(filename, data, dimension_order, pixel_size, channels=[], positions=[], rotation=None,
                  tile_size=None, compression=None, scaler=None):

    ome_metadata, resolution0, resolution_unit0 = create_tiff_metadata(pixel_size, dimension_order,
                                                                       channels, positions, rotation, is_ome=True)
    # maximum size (w/o compression)
    max_size = data.size * data.itemsize
    size = max_size
    if scaler is not None:
        npyramid_add = scaler.max_layer
        for i in range(npyramid_add):
            size //= (scaler.downscale ** 2)
            max_size += size
    else:
        npyramid_add = 0
    bigtiff = (max_size > 2 ** 32)

    tile_size = tile_size[-2:]  # assume order zyx (inversed xyz)
    shape_yx = [data.shape[dimension_order.index(dim)] for dim in 'yx']
    if np.any(np.array(tile_size) > np.array(shape_yx)):
        tile_size = None

    with TiffWriter(filename, bigtiff=bigtiff) as writer:
        for i in range(npyramid_add + 1):
            if i == 0:
                subifds = npyramid_add
                subfiletype = None
                metadata = ome_metadata
                resolution = resolution0[:2]
                resolutionunit = resolution_unit0
            else:
                subifds = None
                subfiletype = 1
                metadata = None
                resolution = None
                resolutionunit = None
                data = scaler.resize_image(data)
                data.rechunk()
            writer.write(data, subifds=subifds, subfiletype=subfiletype,
                         tile=tile_size, compression=compression,
                         resolution=resolution, resolutionunit=resolutionunit, metadata=metadata)

save_tiff(filename, data, dimension_order=None, pixel_size=None, tile_size=(1024, 1024), compression='LZW')

Source code in src\image\ome_tiff_helper.py
11
12
13
14
def save_tiff(filename, data, dimension_order=None, pixel_size=None, tile_size=(1024, 1024), compression='LZW'):
    _, resolution, resolution_unit = create_tiff_metadata(pixel_size, dimension_order)
    tifffile.imwrite(filename, data, tile=tile_size, compression=compression,
                     resolution=resolution, resolutionunit=resolution_unit)

split_num_text(text)

Source code in src\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
def split_num_text(text: str) -> list:
    num_texts = []
    block = ''
    is_num0 = None
    if text is None:
        return None

    for c in text:
        is_num = (c.isnumeric() or c == '.')
        if is_num0 is not None and is_num != is_num0:
            num_texts.append(block)
            block = ''
        block += c
        is_num0 = is_num
    if block != '':
        num_texts.append(block)

    num_texts2 = []
    for block in num_texts:
        block = block.strip()
        try:
            block = float(block)
        except:
            pass
        if block not in [' ', ',', '|']:
            num_texts2.append(block)
    return num_texts2

split_numeric(text)

Source code in src\util.py
153
154
155
156
157
158
159
160
def split_numeric(text: str) -> list:
    num_parts = []
    parts = re.split(r'[_/\\.]', text)
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            num_parts.append(part)
    return num_parts

split_numeric_dict(text)

Source code in src\util.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
def split_numeric_dict(text: str) -> dict:
    num_parts = {}
    parts = re.split(r'[_/\\.]', text)
    parti = 0
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            index = num_span.start()
            label = part[:index]
            if label == '':
                label = parti
            num_parts[label] = num_span.group()
            parti += 1
    return num_parts

split_path(path)

Source code in src\util.py
131
132
def split_path(path: str) -> list:
    return os.path.normpath(path).split(os.path.sep)

split_value_unit_list(text)

Source code in src\util.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
def split_value_unit_list(text: str) -> list:
    value_units = []
    if text is None:
        return None

    items = split_num_text(text)
    if isinstance(items[-1], str):
        def_unit = items[-1]
    else:
        def_unit = ''

    i = 0
    while i < len(items):
        value = items[i]
        if i + 1 < len(items):
            unit = items[i + 1]
        else:
            unit = ''
        if not isinstance(value, str):
            if isinstance(unit, str):
                i += 1
            else:
                unit = def_unit
            value_units.append((value, unit))
        i += 1
    return value_units

validate_transform(transform, max_rotation=None)

Source code in src\util.py
382
383
384
385
386
387
388
389
390
391
392
393
394
def validate_transform(transform, max_rotation=None):
    if transform is None:
        return False
    transform = np.array(transform)
    if np.any(np.isnan(transform)):
        return False
    if np.any(np.isinf(transform)):
        return False
    if np.linalg.det(transform) == 0:
        return False
    if  max_rotation is not None and abs(normalise_rotation(get_rotation_from_transform(transform))) > max_rotation:
        return False
    return True

xyz_to_dict(xyz, axes='xyz')

Source code in src\util.py
447
448
449
def xyz_to_dict(xyz, axes='xyz'):
    dct = {dim: value for dim, value in zip(axes, xyz)}
    return dct

ome_zarr_helper

save_ome_zarr(filename, data, dimension_order, pixel_size, channels, translation, rotation, compression=None, scaler=None, zarr_version=2, ome_version='0.4')

Source code in src\image\ome_zarr_helper.py
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
def save_ome_zarr(filename, data, dimension_order, pixel_size, channels, translation, rotation,
                  compression=None, scaler=None, zarr_version=2, ome_version='0.4'):

    storage_options = {'dimension_separator': '/'}
    compressor, compression_filters = create_compression_filter(compression)
    if compressor is not None:
        storage_options['compressor'] = compressor
    if compression_filters is not None:
        storage_options['filters'] = compression_filters

    if 'z' not in dimension_order:
        # add Z dimension to be able to store Z position
        new_dimension_order = dimension_order.replace('yx', 'zyx')
        data = redimension_data(data, dimension_order, new_dimension_order)
        dimension_order = new_dimension_order

    axes = create_axes_metadata(dimension_order)

    if scaler is not None:
        npyramid_add = scaler.max_layer
        pyramid_downsample = scaler.downscale
    else:
        npyramid_add = 0
        pyramid_downsample = 1

    coordinate_transformations = []
    scale = 1
    for i in range(npyramid_add + 1):
        transform = create_transformation_metadata(dimension_order, pixel_size, scale, translation, rotation)
        coordinate_transformations.append(transform)
        if pyramid_downsample:
            scale /= pyramid_downsample

    if ome_version == '0.4':
        ome_zarr_format = ome_zarr.format.FormatV04()
    elif ome_version == '0.5':
        ome_zarr_format = ome_zarr.format.FormatV05()   # future support anticipated
    else:
        ome_zarr_format = ome_zarr.format.CurrentFormat()

    zarr_root = zarr.open_group(store=filename, mode="w", zarr_version=zarr_version)
    write_image(image=data, group=zarr_root, axes=axes, coordinate_transformations=coordinate_transformations,
                scaler=scaler, storage_options=storage_options, fmt=ome_zarr_format)

    keys = list(zarr_root.array_keys())
    data_smallest = zarr_root.get(keys[-1])

    # get smallest size image
    zarr_root.attrs['omero'] = create_channel_ome_metadata(data_smallest, dimension_order, channels, ome_version)

ome_zarr_util

create_axes_metadata(dimension_order)

Source code in src\image\ome_zarr_util.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
def create_axes_metadata(dimension_order):
    axes = []
    for dimension in dimension_order:
        unit1 = None
        if dimension == 't':
            type1 = 'time'
            unit1 = 'millisecond'
        elif dimension == 'c':
            type1 = 'channel'
        else:
            type1 = 'space'
            unit1 = 'micrometer'
        axis = {'name': dimension, 'type': type1}
        if unit1 is not None and unit1 != '':
            axis['unit'] = unit1
        axes.append(axis)
    return axes

create_channel_metadata(source, ome_version)

Source code in src\image\ome_zarr_util.py
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
def create_channel_metadata(source, ome_version):
    channels = source.get_channels()
    nchannels = source.get_nchannels()

    if len(channels) < nchannels == 3:
        labels = ['Red', 'Green', 'Blue']
        colors = [(1, 0, 0, 1), (0, 1, 0, 1), (0, 0, 1, 1)]
        channels = [{'label': label, 'color': color} for label, color in zip(labels, colors)]

    omezarr_channels = []
    for channeli, channel0 in enumerate(channels):
        channel = channel0.copy()
        color = channel.get('color', (1, 1, 1, 1))
        channel['color'] = rgba_to_hexrgb(color)
        if 'window' not in channel:
            channel['window'] = source.get_channel_window(channeli)
        omezarr_channels.append(channel)

    metadata = {
        'version': ome_version,
        'channels': omezarr_channels,
    }
    return metadata

create_channel_ome_metadata(data, dimension_order, channels, ome_version)

Source code in src\image\ome_zarr_util.py
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
def create_channel_ome_metadata(data, dimension_order, channels, ome_version):
    if 'c' in dimension_order:
        nchannels = data.shape[dimension_order.index('c')]
    else:
        nchannels = 1
    if channels is None or len(channels) < nchannels:
        if nchannels == 3:
            labels = ['Red', 'Green', 'Blue']
            colors = [(1, 0, 0, 1), (0, 1, 0, 1), (0, 0, 1, 1)]
            channels = [{'label': label, 'color': color} for label, color in zip(labels, colors)]
        else:
            channels = [{'label': f'Channel {channeli}'} for channeli in range(nchannels)]

    omezarr_channels = []
    for channeli, channel0 in enumerate(channels):
        channel = channel0.copy()
        color = channel.get('color', (1, 1, 1, 1))
        channel['color'] = rgba_to_hexrgb(color)
        if 'window' not in channel:
            channel['window'] = get_channel_window(data, dimension_order, channeli)
        omezarr_channels.append(channel)

    metadata = {
        'version': ome_version,
        'channels': omezarr_channels,
    }
    return metadata

create_transformation_metadata(dimension_order, pixel_size_um, scale, translation_um=[], rotation=None)

Source code in src\image\ome_zarr_util.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
def create_transformation_metadata(dimension_order, pixel_size_um, scale, translation_um=[], rotation=None):
    metadata = []
    pixel_size_scale = []
    translation_scale = []
    for dimension in dimension_order:
        if dimension == 'z' and len(pixel_size_um) > 2:
            pixel_size_scale1 = pixel_size_um[2]
        elif dimension == 'y' and len(pixel_size_um) > 1:
            pixel_size_scale1 = pixel_size_um[1] / scale
        elif dimension == 'x' and len(pixel_size_um) > 0:
            pixel_size_scale1 = pixel_size_um[0] / scale
        else:
            pixel_size_scale1 = 1
        if pixel_size_scale1 == 0:
            pixel_size_scale1 = 1
        pixel_size_scale.append(pixel_size_scale1)

        if dimension == 'z' and len(translation_um) > 2:
            translation1 = translation_um[2]
        elif dimension == 'y' and len(translation_um) > 1:
            translation1 = translation_um[1] * scale
        elif dimension == 'x' and len(translation_um) > 0:
            translation1 = translation_um[0] * scale
        else:
            translation1 = 0
        translation_scale.append(translation1)

    metadata.append({'type': 'scale', 'scale': pixel_size_scale})
    if not all(v == 0 for v in translation_scale):
        metadata.append({'type': 'translation', 'translation': translation_scale})
    # Supported in ome-zarr V0.6
    #if rotation is not None:
    #    metadata.append({'type': 'rotation', 'rotation': rotation})
    return metadata

get_channel_window(data, dimension_order, channeli)

Source code in src\image\ome_zarr_util.py
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
def get_channel_window(data, dimension_order, channeli):
    min_quantile = 0.001
    max_quantile = 0.999

    if data.dtype.kind == 'f':
        #info = np.finfo(dtype)
        start, end = 0, 1
    else:
        info = np.iinfo(data.dtype)
        start, end = info.min, info.max

    if 'c' in dimension_order:
        data = np.take(data, channeli, axis=dimension_order.index('c'))
    min, max = get_image_quantile(data, min_quantile), get_image_quantile(data, max_quantile)
    window = {'start': start, 'end': end, 'min': min, 'max': max}
    return window

hexrgb_to_rgba(hexrgb)

Source code in src\image\color_conversion.py
21
22
23
def hexrgb_to_rgba(hexrgb: str) -> list:
    rgba = int_to_rgba(eval('0x' + hexrgb + 'FF'))
    return rgba

int_to_rgba(intrgba)

Source code in src\image\color_conversion.py
3
4
5
6
7
8
def int_to_rgba(intrgba: int) -> list:
    signed = (intrgba < 0)
    rgba = [x / 255 for x in intrgba.to_bytes(4, signed=signed, byteorder="big")]
    if rgba[-1] == 0:
        rgba[-1] = 1
    return rgba

rgba_to_hexrgb(rgba)

Source code in src\image\color_conversion.py
16
17
18
def rgba_to_hexrgb(rgba: tuple|list) -> str:
    hexrgb = ''.join([hex(int(x * 255))[2:].upper().zfill(2) for x in rgba[:3]])
    return hexrgb

rgba_to_int(rgba)

Source code in src\image\color_conversion.py
11
12
13
def rgba_to_int(rgba: tuple|list) -> int:
    intrgba = int.from_bytes([int(x * 255) for x in rgba], signed=True, byteorder="big")
    return intrgba

scale_dimensions_dict(shape0, scale)

Source code in src\image\ome_zarr_util.py
145
146
147
148
149
150
151
152
153
def scale_dimensions_dict(shape0, scale):
    shape = {}
    if scale == 1:
        return shape0
    for dimension, shape1 in shape0.items():
        if dimension[0] in ['x', 'y']:
            shape1 = int(shape1 * scale)
        shape[dimension] = shape1
    return shape

scale_dimensions_xy(shape0, dimension_order, scale)

Source code in src\image\ome_zarr_util.py
134
135
136
137
138
139
140
141
142
def scale_dimensions_xy(shape0, dimension_order, scale):
    shape = []
    if scale == 1:
        return shape0
    for shape1, dimension in zip(shape0, dimension_order):
        if dimension[0] in ['x', 'y']:
            shape1 = int(shape1 * scale)
        shape.append(shape1)
    return shape

reg_util

aligned_path = 'D:/slides/12193/aligned_hpc/mappings.json' module-attribute

aligned_transforms = import_json(aligned_path) module-attribute

output_path = 'aligned_stitched_mappings1.json' module-attribute

stitched_filenames = dir_regex(stitched_path) module-attribute

stitched_path = 'D:/slides/12193/stitched_hpc/S???/mappings.json' module-attribute

stitched_transforms = {('S' + split_numeric_dict(filename)['S']): (import_json(filename))for filename in stitched_filenames} module-attribute

transforms2 = make_z_transforms(get_composite_transforms(stitched_transforms, aligned_transforms), to3d=True) module-attribute

get_composite_transforms(transforms, global_transforms)

Source code in src\image\reg_util.py
 7
 8
 9
10
11
12
13
14
15
def get_composite_transforms(transforms, global_transforms):
    transforms2 = {}
    for key, tile_transforms in transforms.items():
        global_transform = global_transforms[key]
        tile_transforms2 = {}
        for tile_key, transform in tile_transforms.items():
            tile_transforms2[tile_key] = combine_transforms([transform, global_transform]).tolist()
        transforms2[key] = tile_transforms2
    return transforms2

make_z_transforms(transforms, to3d=False)

Source code in src\image\reg_util.py
18
19
20
21
22
23
24
25
26
27
def make_z_transforms(transforms, to3d=False):
    transforms2 = {}
    for key, transforms1 in transforms.items():
        for key2, transform in transforms1.items():
            if len(transform) == 3 and to3d:
                transform2 = np.eye(4)
                transform2[1:, 1:] = transform
                transform = transform2.tolist()
            transforms2[f'{key}_{key2}'] = transform
    return transforms2

source_helper

create_dask_data(filename, level=0)

Source code in src\image\source_helper.py
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
def create_dask_data(filename, level=0):
    ext = os.path.splitext(filename)[1]
    if 'zar' in ext:
        group = zarr.open_group(filename, mode='r')
        # using group.attrs to get multiscales is recommended by cgohlke
        paths = group.attrs['multiscales'][0]['datasets']
        path0 = paths[level]['path']
        dask_data = da.from_zarr(os.path.join(filename, path0))
    elif 'tif' in ext:
        with TiffFile(filename) as tif:
            series0 = tif.series[0]
            shape = series0.shape
            dtype = series0.dtype
        lazy_array = dask.delayed(tifffile.imread)(filename, level=level)
        dask_data = da.from_delayed(lazy_array, shape=shape, dtype=dtype)
    else:
        lazy_array = dask.delayed(imageio.v3.imread)(filename)
        # TODO get metadata from metadata = PIL.Image.info
        dask_data = da.from_delayed(lazy_array, shape=shape, dtype=dtype)
    return dask_data

create_dask_source(filename, source_metadata=None)

Source code in src\image\source_helper.py
37
38
39
40
41
42
43
44
45
def create_dask_source(filename, source_metadata=None):
    ext = os.path.splitext(filename)[1].lstrip('.').lower()
    if ext.startswith('tif'):
        dask_source = TiffDaskSource(filename, source_metadata)
    elif '.zar' in filename.lower():
        dask_source = ZarrDaskSource(filename, source_metadata)
    else:
        raise ValueError(f'Unsupported file type: {ext}')
    return dask_source

get_images_metadata(filenames, source_metadata=None)

Source code in src\image\source_helper.py
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
def get_images_metadata(filenames, source_metadata=None):
    summary = 'Filename\tPixel size\tSize\tPosition\tRotation\n'
    sizes = []
    centers = []
    rotations = []
    positions = []
    max_positions = []
    pixel_sizes = []
    for filename in filenames:
        source = create_dask_source(filename, source_metadata)
        pixel_size = dict_to_xyz(source.get_pixel_size())
        size = dict_to_xyz(source.get_physical_size())
        sizes.append(size)
        position = dict_to_xyz(source.get_position())
        rotation = source.get_rotation()
        rotations.append(rotation)

        summary += (f'{get_filetitle(filename)}'
                    f'\t{tuple(pixel_size)}'
                    f'\t{tuple(size)}'
                    f'\t{tuple(position)}')
        if rotation is not None:
            summary += f'\t{rotation}'
        summary += '\n'

        if len(size) < len(position):
            size = list(size) + [0]
        center = np.array(position) + np.array(size) / 2
        pixel_sizes.append(pixel_size)
        centers.append(center)
        positions.append(position)
        max_positions.append(np.array(position) + np.array(size))
    pixel_size = np.mean(pixel_sizes, 0)
    center = np.mean(centers, 0)
    area = np.max(max_positions, 0) - np.min(positions, 0)
    summary += f'Area: {tuple(area)} Center: {tuple(center)}\n'

    rotations2 = []
    for rotation, size in zip(rotations, sizes):
        if rotation is None:
            _, angles = get_orthogonal_pairs(centers, size)
            if len(angles) > 0:
                rotation = -np.mean(angles)
                rotations2.append(rotation)
    if len(rotations2) > 0:
        rotation = np.mean(rotations2)
    else:
        rotation = None
    return {'pixel_size': pixel_size,
            'center': center,
            'area': area,
            'rotation': rotation,
            'summary': summary}

util

apply_transform(points, transform)

Source code in src\util.py
371
372
373
374
375
376
377
378
379
def apply_transform(points, transform):
    new_points = []
    for point in points:
        point_len = len(point)
        while len(point) < len(transform):
            point = list(point) + [1]
        new_point = np.dot(point, np.transpose(np.array(transform)))
        new_points.append(new_point[:point_len])
    return new_points

blur_image(image, sigma)

Source code in src\image\util.py
432
433
434
435
436
437
438
439
440
def blur_image(image, sigma):
    nchannels = image.shape[2] if image.ndim == 3 else 1
    if nchannels not in [1, 3]:
        new_image = np.zeros_like(image)
        for channeli in range(nchannels):
            new_image[..., channeli] = blur_image_single(image[..., channeli], sigma)
    else:
        new_image = blur_image_single(image, sigma)
    return new_image

blur_image_single(image, sigma)

Source code in src\image\util.py
428
429
def blur_image_single(image, sigma):
    return gaussian_filter(image, sigma)

calc_foreground_map(sims)

Source code in src\image\util.py
564
565
566
567
568
569
570
571
572
573
574
575
576
577
def calc_foreground_map(sims):
    if len(sims) <= 2:
        return [True] * len(sims)
    sims = [sim.squeeze().astype(np.float32) for sim in sims]
    median_image = calc_images_median(sims).astype(np.float32)
    difs = [np.mean(np.abs(sim - median_image), (0, 1)) for sim in sims]
    # or use stddev instead of mean?
    threshold = np.mean(difs, 0)
    #threshold, _ = cv.threshold(np.array(difs).astype(np.uint16), 0, 1, cv.THRESH_OTSU)
    #threshold, foregrounds = filter_noise_images(channel_images)
    map = (difs > threshold)
    if np.all(map == False):
        return [True] * len(sims)
    return map

calc_images_median(images)

Source code in src\image\util.py
443
444
445
446
def calc_images_median(images):
    out_image = np.zeros(shape=images[0].shape, dtype=images[0].dtype)
    median_image = np.median(images, 0, out_image)
    return median_image

calc_images_quantiles(images, quantiles)

Source code in src\image\util.py
449
450
451
def calc_images_quantiles(images, quantiles):
    quantile_images = [image.astype(np.float32) for image in np.quantile(images, quantiles, 0)]
    return quantile_images

calc_output_properties(sims, transform_key, z_scale=None)

Source code in src\image\util.py
627
628
629
630
631
632
633
634
635
636
637
def calc_output_properties(sims, transform_key, z_scale=None):
    output_spacing = si_utils.get_spacing_from_sim(sims[0])
    if z_scale is not None:
        output_spacing['z'] = z_scale
    output_properties = fusion.calc_fusion_stack_properties(
        sims,
        [si_utils.get_affine_from_sim(sim, transform_key) for sim in sims],
        output_spacing,
        mode='union',
    )
    return output_properties

calc_pyramid(xyzct, npyramid_add=0, pyramid_downsample=2, volumetric_resize=False)

Source code in src\image\util.py
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def calc_pyramid(xyzct: tuple, npyramid_add: int = 0, pyramid_downsample: float = 2,
                 volumetric_resize: bool = False) -> list:
    x, y, z, c, t = xyzct
    if volumetric_resize and z > 1:
        size = (x, y, z)
    else:
        size = (x, y)
    sizes_add = []
    scale = 1
    for _ in range(npyramid_add):
        scale /= pyramid_downsample
        scaled_size = np.maximum(np.round(np.multiply(size, scale)).astype(int), 1)
        sizes_add.append(scaled_size)
    return sizes_add

check_round_significants(a, significant_digits)

Source code in src\util.py
113
114
115
116
117
118
119
120
121
def check_round_significants(a: float, significant_digits: int) -> float:
    rounded = round_significants(a, significant_digits)
    if a != 0:
        dif = 1 - rounded / a
    else:
        dif = rounded - a
    if abs(dif) < 10 ** -significant_digits:
        return rounded
    return a

color_image(image)

Source code in src\image\util.py
42
43
44
45
46
47
def color_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 1:
        return cv.cvtColor(np.array(image), cv.COLOR_GRAY2RGB)
    else:
        return image

combine_transforms(transforms)

Source code in src\image\util.py
709
710
711
712
713
714
715
716
def combine_transforms(transforms):
    combined_transform = None
    for transform in transforms:
        if combined_transform is None:
            combined_transform = transform
        else:
            combined_transform = np.dot(transform, combined_transform)
    return combined_transform

convert_image_sign_type(image, target_dtype)

Source code in src\image\util.py
 97
 98
 99
100
101
102
103
104
105
106
107
def convert_image_sign_type(image: np.ndarray, target_dtype: np.dtype) -> np.ndarray:
    source_dtype = image.dtype
    if source_dtype.kind == target_dtype.kind:
        new_image = image
    elif source_dtype.kind == 'i':
        new_image = ensure_unsigned_image(image)
    else:
        # conversion without overhead
        offset = 2 ** (8 * target_dtype.itemsize - 1)
        new_image = (image - offset).astype(target_dtype)
    return new_image

convert_rational_value(value)

Source code in src\util.py
321
322
323
324
325
326
327
def convert_rational_value(value) -> float:
    if value is not None and isinstance(value, tuple):
        if value[0] == value[1]:
            value = value[0]
        else:
            value = value[0] / value[1]
    return value

convert_to_um(value, unit)

Source code in src\util.py
310
311
312
313
314
315
316
317
318
def convert_to_um(value, unit):
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    return value * conversions.get(unit, 1)

create_compression_filter(compression)

Source code in src\image\util.py
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
def create_compression_filter(compression: list) -> tuple:
    compressor, compression_filters = None, None
    compression = ensure_list(compression)
    if compression is not None and len(compression) > 0:
        compression_type = compression[0].lower()
        if len(compression) > 1:
            level = int(compression[1])
        else:
            level = None
        if 'lzw' in compression_type:
            from imagecodecs.numcodecs import Lzw
            compression_filters = [Lzw()]
        elif '2k' in compression_type or '2000' in compression_type:
            from imagecodecs.numcodecs import Jpeg2k
            compression_filters = [Jpeg2k(level=level)]
        elif 'jpegls' in compression_type:
            from imagecodecs.numcodecs import Jpegls
            compression_filters = [Jpegls(level=level)]
        elif 'jpegxr' in compression_type:
            from imagecodecs.numcodecs import Jpegxr
            compression_filters = [Jpegxr(level=level)]
        elif 'jpegxl' in compression_type:
            from imagecodecs.numcodecs import Jpegxl
            compression_filters = [Jpegxl(level=level)]
        else:
            compressor = compression
    return compressor, compression_filters

create_transform(center, angle, matrix_size=3)

Source code in src\util.py
356
357
358
359
360
361
362
363
364
365
366
367
368
def create_transform(center, angle, matrix_size=3):
    if isinstance(center, dict):
        center = dict_to_xyz(center)
    if len(center) == 2:
        center = np.array(list(center) + [0])
    if angle is None:
        angle = 0
    r = Rotation.from_euler('z', angle, degrees=True)
    t = center - r.apply(center, inverse=True)
    transform = np.eye(matrix_size)
    transform[:3, :3] = np.transpose(r.as_matrix())
    transform[:3, -1] += t
    return transform

create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0))

Source code in src\util.py
348
349
350
351
352
353
def create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0)):
    transform = cv.getRotationMatrix2D(center[:2], angle, scale)
    transform[:, 2] += translate
    if len(transform) == 2:
        transform = np.vstack([transform, [0, 0, 1]])   # create 3x3 matrix
    return transform

desc_to_dict(desc)

Source code in src\util.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def desc_to_dict(desc: str) -> dict:
    desc_dict = {}
    if desc.startswith('{'):
        try:
            metadata = ast.literal_eval(desc)
            return metadata
        except:
            pass
    for item in re.split(r'[\r\n\t|]', desc):
        item_sep = '='
        if ':' in item:
            item_sep = ':'
        if item_sep in item:
            items = item.split(item_sep)
            key = items[0].strip()
            value = items[1].strip()
            for dtype in (int, float, bool):
                try:
                    value = dtype(value)
                    break
                except:
                    pass
            desc_dict[key] = value
    return desc_dict

detect_area_points(image)

Source code in src\image\util.py
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
def detect_area_points(image):
    method = cv.THRESH_OTSU
    threshold = -5
    contours = []
    while len(contours) <= 1 and threshold <= 255:
        _, binimage = cv.threshold(np.array(uint8_image(image)), threshold, 255, method)
        contours0 = cv.findContours(binimage, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
        contours = contours0[0] if len(contours0) == 2 else contours0[1]
        method = cv.THRESH_BINARY
        threshold += 5
    area_contours = [(contour, cv.contourArea(contour)) for contour in contours]
    area_contours.sort(key=lambda contour_area: contour_area[1], reverse=True)
    min_area = max(np.mean([area for contour, area in area_contours]), 1)
    area_points = [(get_center(contour), area) for contour, area in area_contours if area > min_area]

    #image = cv.cvtColor(image, cv.COLOR_GRAY2BGR)
    #for point in area_points:
    #    radius = int(np.round(np.sqrt(point[1]/np.pi)))
    #    cv.circle(image, tuple(np.round(point[0]).astype(int)), radius, (255, 0, 0), -1)
    #show_image(image)
    return area_points

dict_to_xyz(dct, keys='xyz')

Source code in src\util.py
452
453
def dict_to_xyz(dct, keys='xyz'):
    return [dct[key] for key in keys if key in dct]

dir_regex(pattern)

Source code in src\util.py
141
142
143
144
145
146
def dir_regex(pattern):
    files = []
    for pattern_item in ensure_list(pattern):
        files.extend(glob.glob(pattern_item, recursive=True))
    files_sorted = sorted(files, key=lambda file: find_all_numbers(get_filetitle(file)))
    return files_sorted

draw_edge_filter(bounds)

Source code in src\util.py
496
497
498
499
500
501
502
503
504
def draw_edge_filter(bounds):
    out_image = np.zeros(np.flip(bounds))
    y, x = np.where(out_image == 0)
    points = np.transpose([x, y])

    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) * 10, 0, 1)
    return position_weights.reshape(np.flip(bounds))

draw_keypoints(image, points, color=(255, 0, 0))

Source code in src\image\util.py
279
280
281
282
283
284
def draw_keypoints(image, points, color=(255, 0, 0)):
    out_image = color_image(float2int_image(image))
    for point in points:
        point = np.round(point).astype(int)
        cv.drawMarker(out_image, tuple(point), color=color, markerType=cv.MARKER_CROSS, markerSize=5, thickness=1)
    return out_image

draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None, points_color='black', match_color='red', inlier_color='lime', show_plot=True, output_filename=None)

Source code in src\image\util.py
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
def draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None,
                           points_color='black', match_color='red', inlier_color='lime',
                           show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape = np.max([image.shape for image in [image1, image2]], axis=0)
    shape_y, shape_x = shape[:2]
    if shape_x > 2 * shape_y:
        merge_axis = 0
        offset2 = [shape_y, 0]
    else:
        merge_axis = 1
        offset2 = [0, shape_x]
    image = np.concatenate([
        np.pad(image1, ((0, shape[0] - image1.shape[0]), (0, shape[1] - image1.shape[1]))),
        np.pad(image2, ((0, shape[0] - image2.shape[0]), (0, shape[1] - image2.shape[1])))
    ], axis=merge_axis)
    ax.imshow(image, cmap='gray')

    ax.scatter(
        points1[:, 1],
        points1[:, 0],
        facecolors='none',
        edgecolors=points_color,
    )
    ax.scatter(
        points2[:, 1] + offset2[1],
        points2[:, 0] + offset2[0],
        facecolors='none',
        edgecolors=points_color,
    )

    for i, match in enumerate(matches):
        color = match_color
        if i < len(inliers) and inliers[i]:
            color = inlier_color
        index1, index2 = match
        ax.plot(
            (points1[index1, 1], points2[index2, 1] + offset2[1]),
            (points1[index1, 0], points2[index2, 0] + offset2[0]),
            '-', linewidth=1, alpha=0.5, color=color,
        )

    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

    return fig, ax

draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None, color=(255, 0, 0), inlier_color=(0, 255, 0), radius=15, thickness=2)

Source code in src\image\util.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
def draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None,
                              color=(255, 0, 0), inlier_color=(0, 255, 0), radius = 15, thickness = 2):
    # based on https://gist.github.com/woolpeeker/d7e1821e1b5c556b32aafe10b7a1b7e8
    image1 = uint8_image(image1)
    image2 = uint8_image(image2)
    # We're drawing them side by side.  Get dimensions accordingly.
    new_shape = (max(image1.shape[0], image2.shape[0]), image1.shape[1] + image2.shape[1], 3)
    out_image = np.zeros(new_shape, image1.dtype)
    # Place images onto the new image.
    out_image[0:image1.shape[0], 0:image1.shape[1]] = color_image(image1)
    out_image[0:image2.shape[0], image1.shape[1]:image1.shape[1] + image2.shape[1]] = color_image(image2)

    if matches is not None:
        # Draw lines between matches.  Make sure to offset kp coords in second image appropriately.
        for index, match in enumerate(matches):
            if inliers is not None and inliers[index]:
                line_color = inlier_color
            else:
                line_color = color
            # So the keypoint locs are stored as a tuple of floats.  cv2.line() wants locs as a tuple of ints.
            end1 = tuple(np.round(points1[match[0]]).astype(int))
            end2 = tuple(np.round(points2[match[1]]).astype(int) + np.array([image1.shape[1], 0]))
            cv.line(out_image, end1, end2, line_color, thickness)
            cv.circle(out_image, end1, radius, line_color, thickness)
            cv.circle(out_image, end2, radius, line_color, thickness)
    else:
        # Draw all points if no matches are provided.
        for point in points1:
            point = tuple(np.round(point).astype(int))
            cv.circle(out_image, point, radius, color, thickness)
        for point in points2:
            point = tuple(np.round(point).astype(int) + np.array([image1.shape[1], 0]))
            cv.circle(out_image, point, radius, color, thickness)
    return out_image

draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None, show_plot=True, output_filename=None)

Source code in src\image\util.py
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
def draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None,
                              show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape_y, shape_x = image1.shape[:2]
    if shape_x > 2 * shape_y:
        alignment = 'vertical'
    else:
        alignment = 'horizontal'
    plot_matched_features(
        image1,
        image2,
        keypoints0=points1,
        keypoints1=points2,
        matches=matches,
        ax=ax,
        alignment=alignment,
        only_matches=True,
    )
    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

ensure_list(x)

Source code in src\util.py
18
19
20
21
22
23
24
def ensure_list(x) -> list:
    if x is None:
        return []
    elif isinstance(x, list):
        return x
    else:
        return [x]

ensure_unsigned_image(image)

Source code in src\image\util.py
85
86
87
88
89
90
91
92
93
94
def ensure_unsigned_image(image: np.ndarray) -> np.ndarray:
    source_dtype = image.dtype
    dtype = ensure_unsigned_type(source_dtype)
    if dtype != source_dtype:
        # conversion without overhead
        offset = 2 ** (8 * dtype.itemsize - 1)
        new_image = image.astype(dtype) + offset
    else:
        new_image = image
    return new_image

ensure_unsigned_type(dtype)

Source code in src\image\util.py
78
79
80
81
82
def ensure_unsigned_type(dtype: np.dtype) -> np.dtype:
    new_dtype = dtype
    if dtype.kind == 'i' or dtype.byteorder == '>' or dtype.byteorder == '<':
        new_dtype = np.dtype(f'u{dtype.itemsize}')
    return new_dtype

eval_context(data, key, default_value, context)

Source code in src\util.py
266
267
268
269
270
271
272
273
274
275
276
277
def eval_context(data, key, default_value, context):
    value = data.get(key, default_value)
    if isinstance(value, str):
        try:
            value = value.format_map(context)
        except:
            pass
        try:
            value = eval(value, context)
        except:
            pass
    return value

export_csv(filename, data, header=None)

Source code in src\util.py
602
603
604
605
606
607
608
def export_csv(filename, data, header=None):
    with open(filename, 'w', encoding='utf8', newline='') as file:
        csvwriter = csv.writer(file)
        if header is not None:
            csvwriter.writerow(header)
        for row in data:
            csvwriter.writerow(row)

export_json(filename, data)

Source code in src\util.py
591
592
593
def export_json(filename, data):
    with open(filename, 'w', encoding='utf8') as file:
        json.dump(data, file, indent=4)

filter_dict(dict0)

Source code in src\util.py
38
39
40
41
42
43
44
45
46
47
48
49
50
def filter_dict(dict0: dict) -> dict:
    new_dict = {}
    for key, value0 in dict0.items():
        if value0 is not None:
            values = []
            for value in ensure_list(value0):
                if isinstance(value, dict):
                    value = filter_dict(value)
                values.append(value)
            if len(values) == 1:
                values = values[0]
            new_dict[key] = values
    return new_dict

filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5)

Source code in src\util.py
487
488
489
490
491
492
493
def filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5):
    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) / filter_factor, 0, 1)
    order_weights = 1 - np.array(range(len(points))) / len(points) / 2
    weights = position_weights * order_weights
    return weights > threshold

filter_noise_images(images)

Source code in src\image\util.py
508
509
510
511
512
513
514
def filter_noise_images(images):
    dtype = images[0].dtype
    maxval = 2 ** (8 * dtype.itemsize) - 1
    image_vars = [np.asarray(np.std(image)).item() for image in images]
    threshold, mask0 = cv.threshold(np.array(image_vars).astype(dtype), 0, maxval, cv.THRESH_OTSU)
    mask = [flag.item() for flag in mask0.astype(bool)]
    return int(threshold), mask

find_all_numbers(text)

Source code in src\util.py
149
150
def find_all_numbers(text: str) -> list:
    return list(map(int, re.findall(r'\d+', text)))

float2int_image(image, target_dtype=np.dtype(np.uint8))

Source code in src\image\util.py
59
60
61
62
63
64
65
def float2int_image(image, target_dtype=np.dtype(np.uint8)):
    source_dtype = image.dtype
    if source_dtype.kind not in ('i', 'u') and not target_dtype.kind == 'f':
        maxval = 2 ** (8 * target_dtype.itemsize) - 1
        return (image * maxval).astype(target_dtype)
    else:
        return image

get_center(data, offset=(0, 0))

Source code in src\util.py
339
340
341
342
343
344
345
def get_center(data, offset=(0, 0)):
    moments = get_moments(data, offset=offset)
    if moments['m00'] != 0:
        center = get_moments_center(moments)
    else:
        center = np.mean(data, 0).flatten()  # close approximation
    return center.astype(np.float32)

get_center_from_transform(transform)

Source code in src\util.py
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
def get_center_from_transform(transform):
    # from opencv:
    # t0 = (1-alpha) * cx - beta * cy
    # t1 = beta * cx + (1-alpha) * cy
    # where
    # alpha = cos(angle) * scale
    # beta = sin(angle) * scale
    # isolate cx and cy:
    t0, t1 = transform[:2, 2]
    scale = 1
    angle = np.arctan2(transform[0][1], transform[0][0])
    alpha = np.cos(angle) * scale
    beta = np.sin(angle) * scale
    cx = (t1 + t0 * (1 - alpha) / beta) / (beta + (1 - alpha) ** 2 / beta)
    cy = ((1 - alpha) * cx - t0) / beta
    return cx, cy

get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None)

Source code in src\image\util.py
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
def get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None):
    if rotation is None:
        rotation = 0

    if isinstance(data, DataTree):
        sim = msi_utils.get_sim_from_msim(data)
    else:
        sim = data
    sdims = ''.join(si_utils.get_spatial_dims_from_sim(sim))
    sdims = sdims.replace('zyx', 'xyz').replace('yx', 'xy')   # order xy(z)
    origin = si_utils.get_origin_from_sim(sim)
    translation = [origin[sdim] for sdim in sdims]

    if len(translation) == 0:
        translation = [0, 0]
    if len(translation) == 2:
        if translation0 is not None and len (translation0) == 3:
            z = translation0[2]
        else:
            z = 0
        translation = list(translation) + [z]

    if transform is not None:
        translation1, rotation1, _ = get_properties_from_transform(transform, invert=True)
        translation = np.array(translation) + translation1
        rotation += rotation1

    if transform_key is not None:
        transform1 = sim.transforms[transform_key]
        translation1, rotation1, _ = get_properties_from_transform(transform1, invert=True)
        rotation += rotation1

    return translation, rotation

get_default(x, default)

Source code in src\util.py
14
15
def get_default(x, default):
    return default if x is None else x

get_filetitle(filename)

Source code in src\util.py
135
136
137
138
def get_filetitle(filename: str) -> str:
    filebase = os.path.basename(filename)
    title = os.path.splitext(filebase)[0].rstrip('.ome')
    return title

get_image_quantile(image, quantile, axis=None)

Source code in src\image\util.py
454
455
456
def get_image_quantile(image: np.ndarray, quantile: float, axis=None) -> float:
    value = np.quantile(image, quantile, axis=axis).astype(image.dtype)
    return value

get_image_size_info(sizes_xyzct, pixel_nbytes, pixel_type, channels)

Source code in src\image\util.py
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def get_image_size_info(sizes_xyzct: list, pixel_nbytes: int, pixel_type: np.dtype, channels: list) -> str:
    image_size_info = 'XYZCT:'
    size = 0
    for i, size_xyzct in enumerate(sizes_xyzct):
        w, h, zs, cs, ts = size_xyzct
        size += np.int64(pixel_nbytes) * w * h * zs * cs * ts
        if i > 0:
            image_size_info += ','
        image_size_info += f' {w} {h} {zs} {cs} {ts}'
    image_size_info += f' Pixel type: {pixel_type} Uncompressed: {print_hbytes(size)}'
    if sizes_xyzct[0][3] == 3:
        channel_info = 'rgb'
    else:
        channel_info = ','.join([channel.get('Name', '') for channel in channels])
    if channel_info != '':
        image_size_info += f' Channels: {channel_info}'
    return image_size_info

get_image_window(image, low=0.01, high=0.99)

Source code in src\image\util.py
459
460
461
462
463
464
def get_image_window(image, low=0.01, high=0.99):
    window = (
        get_image_quantile(image, low),
        get_image_quantile(image, high)
    )
    return window

get_max_downsamples(shape, npyramid_add, pyramid_downsample)

Source code in src\image\util.py
498
499
500
501
502
503
504
505
def get_max_downsamples(shape, npyramid_add, pyramid_downsample):
    shape = list(shape)
    for i in range(npyramid_add):
        shape[-1] //= pyramid_downsample
        shape[-2] //= pyramid_downsample
        if shape[-1] < 1 or shape[-2] < 1:
            return i
    return npyramid_add

get_mean_nn_distance(points1, points2)

Source code in src\util.py
483
484
def get_mean_nn_distance(points1, points2):
    return np.mean([get_nn_distance(points1), get_nn_distance(points2)])

get_moments(data, offset=(0, 0))

Source code in src\util.py
330
331
332
def get_moments(data, offset=(0, 0)):
    moments = cv.moments((np.array(data) + offset).astype(np.float32))    # doesn't work for float64!
    return moments

get_moments_center(moments, offset=(0, 0))

Source code in src\util.py
335
336
def get_moments_center(moments, offset=(0, 0)):
    return np.array([moments['m10'], moments['m01']]) / moments['m00'] + np.array(offset)

get_nn_distance(points0)

Source code in src\util.py
472
473
474
475
476
477
478
479
480
def get_nn_distance(points0):
    points = list(set(map(tuple, points0)))     # get unique points
    if len(points) >= 2:
        tree = KDTree(points, leaf_size=2)
        dist, ind = tree.query(points, k=2)
        nn_distance = np.median(dist[:, 1])
    else:
        nn_distance = 1
    return nn_distance

get_numpy_slicing(dimension_order, **slicing)

Source code in src\image\util.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
def get_numpy_slicing(dimension_order, **slicing):
    slices = []
    for axis in dimension_order:
        index = slicing.get(axis)
        index0 = slicing.get(axis + '0')
        index1 = slicing.get(axis + '1')
        if index0 is not None and index1 is not None:
            slice1 = slice(int(index0), int(index1))
        elif index is not None:
            slice1 = int(index)
        else:
            slice1 = slice(None)
        slices.append(slice1)
    return tuple(slices)

get_orthogonal_pairs(origins, image_size_um)

Get pairs of orthogonal neighbors from a list of tiles. Tiles don't have to be placed on a regular grid.

Source code in src\util.py
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
def get_orthogonal_pairs(origins, image_size_um):
    """
    Get pairs of orthogonal neighbors from a list of tiles.
    Tiles don't have to be placed on a regular grid.
    """
    pairs = []
    angles = []
    z_positions = [pos[0] for pos in origins if len(pos) == 3]
    ordered_z = sorted(set(z_positions))
    is_mixed_3dstack = len(ordered_z) < len(z_positions)
    for i, j in np.transpose(np.triu_indices(len(origins), 1)):
        origini = np.array(origins[i])
        originj = np.array(origins[j])
        if is_mixed_3dstack:
            # ignore z value for distance
            distance = math.dist(origini[-2:], originj[-2:])
            min_distance = max(image_size_um[-2:])
            z_i, z_j = origini[0], originj[0]
            is_same_z = (z_i == z_j)
            is_close_z = abs(ordered_z.index(z_i) - ordered_z.index(z_j)) <= 1
            if not is_same_z:
                # for tiles in different z stack, require greater overlap
                min_distance *= 0.8
            ok = (distance < min_distance and is_close_z)
        else:
            distance = math.dist(origini, originj)
            min_distance = max(image_size_um)
            ok = (distance < min_distance)
        if ok:
            pairs.append((i, j))
            vector = origini - originj
            angle = math.degrees(math.atan2(vector[1], vector[0]))
            if distance < min(image_size_um):
                angle += 90
            while angle < -90:
                angle += 180
            while angle > 90:
                angle -= 180
            angles.append(angle)
    return pairs, angles

get_properties_from_transform(transform, invert=False)

Source code in src\image\util.py
660
661
662
663
664
665
666
667
668
669
670
671
def get_properties_from_transform(transform, invert=False):
    if len(transform.shape) == 3:
        transform = transform[0]
    if invert:
        transform = param_utils.invert_coordinate_order(transform)
    transform = np.array(transform)
    translation = param_utils.translation_from_affine(transform)
    if len(translation) == 2:
        translation = list(translation) + [0]
    rotation = get_rotation_from_transform(transform)
    scale = get_scale_from_transform(transform)
    return translation, rotation, scale

get_rotation_from_transform(transform)

Source code in src\util.py
427
428
429
def get_rotation_from_transform(transform):
    rotation = np.rad2deg(np.arctan2(transform[0][1], transform[0][0]))
    return rotation

get_scale_from_transform(transform)

Source code in src\util.py
397
398
399
def get_scale_from_transform(transform):
    scale = np.mean(np.linalg.norm(transform, axis=0)[:-1])
    return scale

get_sim_physical_size(sim, invert=False)

Source code in src\image\util.py
620
621
622
623
624
def get_sim_physical_size(sim, invert=False):
    size = si_utils.get_shape_from_sim(sim, asarray=True) * si_utils.get_spacing_from_sim(sim, asarray=True)
    if invert:
        size = np.flip(size)
    return size

get_sim_position_final(sim)

Source code in src\image\util.py
540
541
542
543
544
545
def get_sim_position_final(sim):
    transform_keys = si_utils.get_tranform_keys_from_sim(sim)
    transform = combine_transforms([np.array(si_utils.get_affine_from_sim(sim, transform_key))
                                    for transform_key in transform_keys])
    position = apply_transform([si_utils.get_origin_from_sim(sim, asarray=True)], transform)[0]
    return position

get_sim_shape_2d(sim, transform_key=None)

Source code in src\image\util.py
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
def get_sim_shape_2d(sim, transform_key=None):
    if 't' in sim.coords.xindexes:
        # work-around for points error in get_overlap_bboxes()
        sim1 = si_utils.sim_sel_coords(sim, {'t': 0})
    else:
        sim1 = sim
    stack_props = si_utils.get_stack_properties_from_sim(sim1, transform_key=transform_key)
    vertices = mv_graph.get_vertices_from_stack_props(stack_props)
    if vertices.shape[1] == 3:
        # remove z coordinate
        vertices = vertices[:, 1:]
    if len(vertices) >= 8:
        # remove redundant x/y vertices
        vertices = vertices[:4]
    if len(vertices) >= 4:
        # last 2 vertices appear to be swapped
        vertices[2:] = np.array(list(reversed(vertices[2:])))
    return vertices

get_translation_from_transform(transform)

Source code in src\util.py
402
403
404
405
406
def get_translation_from_transform(transform):
    ndim = len(transform) - 1
    #translation = transform[:ndim, ndim]
    translation = apply_transform([[0] * ndim], transform)[0]
    return translation

get_unique_file_labels(filenames)

Source code in src\util.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def get_unique_file_labels(filenames: list) -> list:
    file_labels = []
    file_parts = []
    label_indices = set()
    last_parts = None
    for filename in filenames:
        parts = split_numeric(filename)
        if len(parts) == 0:
            parts = split_numeric(filename)
            if len(parts) == 0:
                parts = filename
        file_parts.append(parts)
        if last_parts is not None:
            for parti, (part1, part2) in enumerate(zip(last_parts, parts)):
                if part1 != part2:
                    label_indices.add(parti)
        last_parts = parts
    label_indices = sorted(list(label_indices))

    for file_part in file_parts:
        file_label = '_'.join([file_part[i] for i in label_indices])
        file_labels.append(file_label)

    if len(set(file_labels)) < len(file_labels):
        # fallback for duplicate labels
        file_labels = [get_filetitle(filename) for filename in filenames]

    return file_labels

get_value_units_micrometer(value_units0)

Source code in src\util.py
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def get_value_units_micrometer(value_units0: list|dict) -> list|dict|None:
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    if value_units0 is None:
        return None

    if isinstance(value_units0, dict):
        values_um = {}
        for dim, value_unit in value_units0.items():
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um[dim] = value_um
    else:
        values_um = []
        for value_unit in value_units0:
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um.append(value_um)
    return values_um

grayscale_image(image)

Source code in src\image\util.py
32
33
34
35
36
37
38
39
def grayscale_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 4:
        return cv.cvtColor(image, cv.COLOR_RGBA2GRAY)
    elif nchannels > 1:
        return cv.cvtColor(image, cv.COLOR_RGB2GRAY)
    else:
        return image

group_sims_by_z(sims)

Source code in src\image\util.py
548
549
550
551
552
553
554
555
556
557
558
559
560
561
def group_sims_by_z(sims):
    grouped_sims = []
    z_positions = [si_utils.get_origin_from_sim(sim).get('z') for sim in sims]
    is_mixed_3dstack = len(set(z_positions)) < len(z_positions)
    if is_mixed_3dstack:
        sims_by_z = {}
        for simi, z_pos in enumerate(z_positions):
            if z_pos is not None and z_pos not in sims_by_z:
                sims_by_z[z_pos] = []
            sims_by_z[z_pos].append(simi)
        grouped_sims = list(sims_by_z.values())
    if len(grouped_sims) == 0:
        grouped_sims = [list(range(len(sims)))]
    return grouped_sims

image_reshape(image, target_size)

Source code in src\image\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
def image_reshape(image: np.ndarray, target_size: tuple) -> np.ndarray:
    tw, th = target_size
    sh, sw = image.shape[0:2]
    if sw < tw or sh < th:
        dw = max(tw - sw, 0)
        dh = max(th - sh, 0)
        padding = [(dh // 2, dh - dh //  2), (dw // 2, dw - dw // 2)]
        if len(image.shape) == 3:
            padding += [(0, 0)]
        image = np.pad(image, padding, mode='constant', constant_values=(0, 0))
    if tw < sw or th < sh:
        image = image[0:th, 0:tw]
    return image

image_resize(image, target_size0, dimension_order='yxc')

Source code in src\image\util.py
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
def image_resize(image: np.ndarray, target_size0: tuple, dimension_order: str = 'yxc') -> np.ndarray:
    shape = image.shape
    x_index = dimension_order.index('x')
    y_index = dimension_order.index('y')
    c_is_at_end = ('c' in dimension_order and dimension_order.endswith('c'))
    size = shape[x_index], shape[y_index]
    if np.mean(np.divide(size, target_size0)) < 1:
        interpolation = cv.INTER_CUBIC
    else:
        interpolation = cv.INTER_AREA
    dtype0 = image.dtype
    image = ensure_unsigned_image(image)
    target_size = tuple(np.maximum(np.round(target_size0).astype(int), 1))
    if dimension_order in ['yxc', 'yx']:
        new_image = cv.resize(np.asarray(image), target_size, interpolation=interpolation)
    elif dimension_order == 'cyx':
        new_image = np.moveaxis(image, 0, -1)
        new_image = cv.resize(np.asarray(new_image), target_size, interpolation=interpolation)
        new_image = np.moveaxis(new_image, -1, 0)
    else:
        ts = image.shape[dimension_order.index('t')] if 't' in dimension_order else 1
        zs = image.shape[dimension_order.index('z')] if 'z' in dimension_order else 1
        target_shape = list(image.shape).copy()
        target_shape[x_index] = target_size[0]
        target_shape[y_index] = target_size[1]
        new_image = np.zeros(target_shape, dtype=image.dtype)
        for t in range(ts):
            for z in range(zs):
                slices = get_numpy_slicing(dimension_order, z=z, t=t)
                image1 = image[slices]
                if not c_is_at_end:
                    image1 = np.moveaxis(image1, 0, -1)
                new_image1 = np.atleast_3d(cv.resize(np.asarray(image1), target_size, interpolation=interpolation))
                if not c_is_at_end:
                    new_image1 = np.moveaxis(new_image1, -1, 0)
                new_image[slices] = new_image1
    new_image = convert_image_sign_type(new_image, dtype0)
    return new_image

import_csv(filename)

Source code in src\util.py
596
597
598
599
def import_csv(filename):
    with open(filename, encoding='utf8') as file:
        data = csv.reader(file)
    return data

import_json(filename)

Source code in src\util.py
585
586
587
588
def import_json(filename):
    with open(filename, encoding='utf8') as file:
        data = json.load(file)
    return data

import_metadata(content, fields=None, input_path=None)

Source code in src\util.py
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
def import_metadata(content, fields=None, input_path=None):
    # return dict[id] = {values}
    if isinstance(content, str):
        ext = os.path.splitext(content)[1].lower()
        if input_path:
            if isinstance(input_path, list):
                input_path = input_path[0]
            content = os.path.normpath(os.path.join(os.path.dirname(input_path), content))
        if ext == '.csv':
            content = import_csv(content)
        elif ext in ['.json', '.ome.json']:
            content = import_json(content)
    if fields is not None:
        content = [[data[field] for field in fields] for data in content]
    return content

int2float_image(image)

Source code in src\image\util.py
50
51
52
53
54
55
56
def int2float_image(image):
    source_dtype = image.dtype
    if not source_dtype.kind == 'f':
        maxval = 2 ** (8 * source_dtype.itemsize) - 1
        return image / np.float32(maxval)
    else:
        return image

norm_image_quantiles(image0, quantile=0.99)

Source code in src\image\util.py
484
485
486
487
488
489
490
491
492
493
494
495
def norm_image_quantiles(image0, quantile=0.99):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    min_value = np.quantile(image, 1 - quantile)
    max_value = np.quantile(image, quantile)
    normimage = (image - np.mean(image)) / (max_value - min_value)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

norm_image_variance(image0)

Source code in src\image\util.py
472
473
474
475
476
477
478
479
480
481
def norm_image_variance(image0):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    normimage = (image - np.mean(image)) / np.std(image)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

normalise(sims, transform_key, use_global=True)

Source code in src\image\util.py
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
def normalise(sims, transform_key, use_global=True):
    new_sims = []
    dtype = sims[0].dtype
    # global mean and stddev
    if use_global:
        mins = []
        ranges = []
        for sim in sims:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
            #min, max = get_image_window(sim, low=0.01, high=0.99)
            #range = max - min
            mins.append(min)
            ranges.append(range)
        min = np.mean(mins)
        range = np.mean(ranges)
    else:
        min = 0
        range = 1
    # normalise all images
    for sim in sims:
        if not use_global:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
        image = (sim - min) / range
        image = float2int_image(image.clip(0, 1), dtype)    # np.clip(image) is not dask-compatible, use image.clip() instead
        new_sim = si_utils.get_sim_from_array(
            image,
            dims=sim.dims,
            scale=si_utils.get_spacing_from_sim(sim),
            translation=si_utils.get_origin_from_sim(sim),
            transform_key=transform_key,
            affine=si_utils.get_affine_from_sim(sim, transform_key),
            c_coords=sim.c.data,
            t_coords=sim.t.data
        )
        new_sims.append(new_sim)
    return new_sims

normalise_rotated_positions(positions0, rotations0, size, center)

Source code in src\util.py
456
457
458
459
460
461
462
463
464
465
466
467
468
469
def normalise_rotated_positions(positions0, rotations0, size, center):
    # in [xy(z)]
    positions = []
    rotations = []
    _, angles = get_orthogonal_pairs(positions0, size)
    for position0, rotation in zip(positions0, rotations0):
        if rotation is None and len(angles) > 0:
            rotation = -np.mean(angles)
        angle = -rotation if rotation is not None else None
        transform = create_transform(center=center, angle=angle, matrix_size=4)
        position = apply_transform([position0], transform)[0]
        positions.append(position)
        rotations.append(rotation)
    return positions, rotations

normalise_rotation(rotation)

Normalise rotation to be in the range [-180, 180].

Source code in src\util.py
432
433
434
435
436
437
438
439
440
def normalise_rotation(rotation):
    """
    Normalise rotation to be in the range [-180, 180].
    """
    while rotation < -180:
        rotation += 360
    while rotation > 180:
        rotation -= 360
    return rotation

normalise_values(image, min_value, max_value)

Source code in src\image\util.py
467
468
469
def normalise_values(image: np.ndarray, min_value: float, max_value: float) -> np.ndarray:
    image = (image.astype(np.float32) - min_value) / (max_value - min_value)
    return image.clip(0, 1)

pilmode_to_pixelinfo(mode)

Source code in src\image\util.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
def pilmode_to_pixelinfo(mode: str) -> tuple:
    pixelinfo = (np.uint8, 8, 1)
    mode_types = {
        'I': (np.uint32, 32, 1),
        'F': (np.float32, 32, 1),
        'RGB': (np.uint8, 24, 3),
        'RGBA': (np.uint8, 32, 4),
        'CMYK': (np.uint8, 32, 4),
        'YCbCr': (np.uint8, 24, 3),
        'LAB': (np.uint8, 24, 3),
        'HSV': (np.uint8, 24, 3),
    }
    if '16' in mode:
        pixelinfo = (np.uint16, 16, 1)
    elif '32' in mode:
        pixelinfo = (np.uint32, 32, 1)
    elif mode in mode_types:
        pixelinfo = mode_types[mode]
    pixelinfo = (np.dtype(pixelinfo[0]), pixelinfo[1])
    return pixelinfo

points_to_3d(points)

Source code in src\util.py
443
444
def points_to_3d(points):
    return [list(point) + [0] for point in points]

precise_resize(image, factors)

Source code in src\image\util.py
272
273
274
275
276
def precise_resize(image: np.ndarray, factors) -> np.ndarray:
    if image.ndim > len(factors):
        factors = list(factors) + [1]
    new_image = downscale_local_mean(np.asarray(image), tuple(factors)).astype(image.dtype)
    return new_image

print_dict(dct, indent=0)

Source code in src\util.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def print_dict(dct: dict, indent: int = 0) -> str:
    s = ''
    if isinstance(dct, dict):
        for key, value in dct.items():
            s += '\n'
            if not isinstance(value, list):
                s += '\t' * indent + str(key) + ': '
            if isinstance(value, dict):
                s += print_dict(value, indent=indent + 1)
            elif isinstance(value, list):
                for v in value:
                    s += print_dict(v)
            else:
                s += str(value)
    else:
        s += str(dct)
    return s

print_hbytes(nbytes)

Source code in src\util.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def print_hbytes(nbytes: int) -> str:
    exps = ['', 'K', 'M', 'G', 'T', 'P', 'E']
    div = 1024
    exp = 0

    while nbytes > div:
        nbytes /= div
        exp += 1
    if exp < len(exps):
        e = exps[exp]
    else:
        e = f'e{exp * 3}'
    return f'{nbytes:.1f}{e}B'

redimension_data(data, old_order, new_order, **indices)

Source code in src\image\util.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def redimension_data(data, old_order, new_order, **indices):
    # able to provide optional dimension values e.g. t=0, z=0
    if new_order == old_order:
        return data

    new_data = data
    order = old_order
    # remove
    for o in old_order:
        if o not in new_order:
            index = order.index(o)
            dim_value = indices.get(o, 0)
            new_data = np.take(new_data, indices=dim_value, axis=index)
            order = order[:index] + order[index + 1:]
    # add
    for o in new_order:
        if o not in order:
            new_data = np.expand_dims(new_data, 0)
            order = o + order
    # move
    old_indices = [order.index(o) for o in new_order]
    new_indices = list(range(len(new_order)))
    new_data = np.moveaxis(new_data, old_indices, new_indices)
    return new_data

reorder(items, old_order, new_order, default_value=0)

Source code in src\util.py
27
28
29
30
31
32
33
34
35
def reorder(items: list, old_order: str, new_order: str, default_value: int = 0) -> list:
    new_items = []
    for label in new_order:
        if label in old_order:
            item = items[old_order.index(label)]
        else:
            item = default_value
        new_items.append(item)
    return new_items

resize_image(image, new_size)

Source code in src\image\util.py
224
225
226
227
228
229
def resize_image(image, new_size):
    if not isinstance(new_size, (tuple, list, np.ndarray)):
        # use single value for width; apply aspect ratio
        size = np.flip(image.shape[:2])
        new_size = new_size, new_size * size[1] // size[0]
    return cv.resize(image, new_size)

retuple(chunks, shape)

Expand chunks to match shape.

E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028) return (3, 4, 5, 64, 64)

If chunks is an integer, it is applied to all dimensions, to match the behaviour of zarr-python.

Source code in src\util.py
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def retuple(chunks, shape):
    # from ome-zarr-py
    """
    Expand chunks to match shape.

    E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028)
    return (3, 4, 5, 64, 64)

    If chunks is an integer, it is applied to all dimensions, to match
    the behaviour of zarr-python.
    """

    if isinstance(chunks, int):
        return tuple([chunks] * len(shape))

    dims_to_add = len(shape) - len(chunks)
    return *shape[:dims_to_add], *chunks

round_significants(a, significant_digits)

Source code in src\util.py
124
125
126
127
128
def round_significants(a: float, significant_digits: int) -> float:
    if a != 0:
        round_decimals = significant_digits - int(np.floor(np.log10(abs(a)))) - 1
        return round(a, round_decimals)
    return a

show_image(image, title='', cmap=None)

Source code in src\image\util.py
22
23
24
25
26
27
28
29
def show_image(image, title='', cmap=None):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if cmap is None:
        cmap = 'gray' if nchannels == 1 else None
    plt.imshow(image, cmap=cmap)
    if title != '':
        plt.title(title)
    plt.show()

split_num_text(text)

Source code in src\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
def split_num_text(text: str) -> list:
    num_texts = []
    block = ''
    is_num0 = None
    if text is None:
        return None

    for c in text:
        is_num = (c.isnumeric() or c == '.')
        if is_num0 is not None and is_num != is_num0:
            num_texts.append(block)
            block = ''
        block += c
        is_num0 = is_num
    if block != '':
        num_texts.append(block)

    num_texts2 = []
    for block in num_texts:
        block = block.strip()
        try:
            block = float(block)
        except:
            pass
        if block not in [' ', ',', '|']:
            num_texts2.append(block)
    return num_texts2

split_numeric(text)

Source code in src\util.py
153
154
155
156
157
158
159
160
def split_numeric(text: str) -> list:
    num_parts = []
    parts = re.split(r'[_/\\.]', text)
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            num_parts.append(part)
    return num_parts

split_numeric_dict(text)

Source code in src\util.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
def split_numeric_dict(text: str) -> dict:
    num_parts = {}
    parts = re.split(r'[_/\\.]', text)
    parti = 0
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            index = num_span.start()
            label = part[:index]
            if label == '':
                label = parti
            num_parts[label] = num_span.group()
            parti += 1
    return num_parts

split_path(path)

Source code in src\util.py
131
132
def split_path(path: str) -> list:
    return os.path.normpath(path).split(os.path.sep)

split_value_unit_list(text)

Source code in src\util.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
def split_value_unit_list(text: str) -> list:
    value_units = []
    if text is None:
        return None

    items = split_num_text(text)
    if isinstance(items[-1], str):
        def_unit = items[-1]
    else:
        def_unit = ''

    i = 0
    while i < len(items):
        value = items[i]
        if i + 1 < len(items):
            unit = items[i + 1]
        else:
            unit = ''
        if not isinstance(value, str):
            if isinstance(unit, str):
                i += 1
            else:
                unit = def_unit
            value_units.append((value, unit))
        i += 1
    return value_units

uint8_image(image)

Source code in src\image\util.py
68
69
70
71
72
73
74
75
def uint8_image(image):
    source_dtype = image.dtype
    if source_dtype.kind == 'f':
        image = image * 255
    elif source_dtype.itemsize != 1:
        factor = 2 ** (8 * (source_dtype.itemsize - 1))
        image = image // factor
    return image.astype(np.uint8)

validate_transform(transform, max_rotation=None)

Source code in src\util.py
382
383
384
385
386
387
388
389
390
391
392
393
394
def validate_transform(transform, max_rotation=None):
    if transform is None:
        return False
    transform = np.array(transform)
    if np.any(np.isnan(transform)):
        return False
    if np.any(np.isinf(transform)):
        return False
    if np.linalg.det(transform) == 0:
        return False
    if  max_rotation is not None and abs(normalise_rotation(get_rotation_from_transform(transform))) > max_rotation:
        return False
    return True

xyz_to_dict(xyz, axes='xyz')

Source code in src\util.py
447
448
449
def xyz_to_dict(xyz, axes='xyz'):
    dct = {dim: value for dim, value in zip(axes, xyz)}
    return dct

metrics

calc_frc(image1, image2)

Source code in src\metrics.py
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
def calc_frc(image1, image2):
    pixel_size1 = si_utils.get_spacing_from_sim(image1)
    pixel_size2 = si_utils.get_spacing_from_sim(image2)
    pixel_size = np.mean([pixel_size1['x'], pixel_size1['y'], pixel_size2['x'], pixel_size2['y']])
    max_size = np.flip(np.max([image1.shape, image2.shape], 0))
    image1 = frc.util.square_image(image_reshape(image1, max_size), add_padding=True)
    image2 = frc.util.square_image(image_reshape(image2, max_size), add_padding=True)

    frc_curve = frc.two_frc(image1, image2)
    xs_pix = np.arange(len(frc_curve)) / max(max_size)
    # scale has units [pixels <length unit>^-1] corresponding to original image
    xs_nm_freq = xs_pix / pixel_size
    frc_res, res_y, thres = frc.frc_res(xs_nm_freq, frc_curve, max_size)
    #plt.plot(xs_nm_freq, thres(xs_nm_freq))
    #plt.plot(xs_nm_freq, frc_curve)
    #plt.show()
    return frc_res

calc_match_metrics(points1, points2, transform, threshold, lowe_ratio=None)

Source code in src\metrics.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def calc_match_metrics(points1, points2, transform, threshold, lowe_ratio=None):
    metrics = {}
    transformed_points1 = apply_transform(points1, transform)
    npoints1, npoints2 = len(points1), len(points2)
    npoints = min(npoints1, npoints2)
    if npoints1 == 0 or npoints2 == 0:
        return metrics

    swapped = (npoints1 > npoints2)
    if swapped:
        points1, points2 = points2, points1

    distance_matrix = euclidean_distances(transformed_points1, points2)
    matching_distances = np.diag(distance_matrix)
    if npoints1 == npoints2 and np.mean(matching_distances < threshold) > 0.5:
        # already matching points lists
        nmatches = np.sum(matching_distances < threshold)
    else:
        matches = []
        distances0 = []
        for rowi, row in enumerate(distance_matrix):
            sorted_indices = np.argsort(row)
            index0 = sorted_indices[0]
            distance0 = row[index0]
            matches.append((rowi, sorted_indices))
            distances0.append(distance0)
        sorted_matches = np.argsort(distances0)

        done = []
        nmatches = 0
        matching_distances = []
        for sorted_match in sorted_matches:
            i, match = matches[sorted_match]
            for ji, j in enumerate(match):
                if j not in done:
                    # found best, available match
                    distance0 = distance_matrix[i, j]
                    distance1 = distance_matrix[i, match[ji + 1]] if ji + 1 < len(match) else np.inf
                    matching_distances.append(distance0)    # use all distances to also weigh in the non-matches
                    if distance0 < threshold and (lowe_ratio is None or distance0 < lowe_ratio * distance1):
                        done.append(j)
                        nmatches += 1
                    break

    metrics['nmatches'] = nmatches
    metrics['match_rate'] = nmatches / npoints if npoints > 0 else 0
    distance = np.mean(matching_distances) if nmatches > 0 else np.inf
    metrics['distance'] = float(distance)
    metrics['norm_distance'] = float(distance / threshold)
    return metrics

calc_ncc(image1, image2)

Source code in src\metrics.py
63
64
65
66
67
68
69
70
71
def calc_ncc(image1, image2):
    max_size = np.flip(np.max([image1.shape, image2.shape], 0))
    image1 = image_reshape(image1, max_size)
    image2 = image_reshape(image2, max_size)

    normimage1 = np.array(image1 - np.mean(image1))
    normimage2 = np.array(image2 - np.mean(image2))
    ncc = np.sum(normimage1 * normimage2) / (np.linalg.norm(normimage1) * np.linalg.norm(normimage2))
    return float(ncc)

calc_ncc2(image1, image2)

Source code in src\metrics.py
74
75
76
77
78
79
80
81
82
83
84
def calc_ncc2(image1, image2):
    max_size = np.flip(np.max([image1.shape, image2.shape], 0))
    image1 = image_reshape(image1, max_size)
    image2 = image_reshape(image2, max_size)

    normimage1 = (image1 - np.mean(image1)) / np.std(image1)
    normimage2 = (image2 - np.mean(image2)) / np.std(image2)
    array1 = np.array(normimage1).reshape(-1)
    array2 = np.array(normimage2).reshape(-1)
    ncc = (np.correlate(array1, array2) / max(len(array1), len(array2)))[0]
    return float(ncc)

calc_ssim(image1, image2)

Source code in src\metrics.py
87
88
89
90
91
92
93
94
95
96
97
def calc_ssim(image1, image2):
    dtype = image1.dtype
    maxval = 2 ** (8 * dtype.itemsize) - 1
    max_size = np.flip(np.max([image1.shape, image2.shape], 0))
    image1 = image_reshape(image1, max_size)
    image2 = image_reshape(image2, max_size)
    try:
        ssim = structural_similarity(np.array(image1), np.array(image2), data_range=maxval)
    except ValueError:
        ssim = np.nan
    return float(ssim)

registration_methods

RegistrationMethod

RegistrationMethod

Bases: ABC

Source code in src\registration_methods\RegistrationMethod.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
class RegistrationMethod(ABC):
    def __init__(self, source, params, debug=False):
        self.source_type = source.dtype
        if hasattr(source, 'dims'):
            self.full_size = si_utils.get_shape_from_sim(source, asarray=True)
            self.ndims = 2 + int('z' in source.dims)
        else:
            self.full_size = [size for size in source.shape if size > 4]    # try to filter channel dimension
            self.ndims = len(self.full_size)
        self.params = params
        self.debug = debug
        self.count = 0  # for debugging

    def convert_data_to_float(self, data):
        maxval = 2 ** (8 * self.source_type.itemsize) - 1
        return data / np.float32(maxval)

    @abstractmethod
    def registration(self, fixed_data: SpatialImage, moving_data: SpatialImage, **kwargs) -> dict:
        # this returns the transform in pixel space, needs to be thread-safe!
        # reg_func_transform = linalg.inv(params_transform) / spacing
        # params_transform = linalg.inv(reg_func_transform * spacing)
        return {
            "affine_matrix": [],  # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
            "quality": 1  # float between 0 and 1 (if not available, set to 1.0)
        }
count = 0 instance-attribute
debug = debug instance-attribute
full_size = si_utils.get_shape_from_sim(source, asarray=True) instance-attribute
ndims = 2 + int('z' in source.dims) instance-attribute
params = params instance-attribute
source_type = source.dtype instance-attribute
__init__(source, params, debug=False)
Source code in src\registration_methods\RegistrationMethod.py
 8
 9
10
11
12
13
14
15
16
17
18
def __init__(self, source, params, debug=False):
    self.source_type = source.dtype
    if hasattr(source, 'dims'):
        self.full_size = si_utils.get_shape_from_sim(source, asarray=True)
        self.ndims = 2 + int('z' in source.dims)
    else:
        self.full_size = [size for size in source.shape if size > 4]    # try to filter channel dimension
        self.ndims = len(self.full_size)
    self.params = params
    self.debug = debug
    self.count = 0  # for debugging
convert_data_to_float(data)
Source code in src\registration_methods\RegistrationMethod.py
20
21
22
def convert_data_to_float(self, data):
    maxval = 2 ** (8 * self.source_type.itemsize) - 1
    return data / np.float32(maxval)
registration(fixed_data, moving_data, **kwargs) abstractmethod
Source code in src\registration_methods\RegistrationMethod.py
24
25
26
27
28
29
30
31
32
@abstractmethod
def registration(self, fixed_data: SpatialImage, moving_data: SpatialImage, **kwargs) -> dict:
    # this returns the transform in pixel space, needs to be thread-safe!
    # reg_func_transform = linalg.inv(params_transform) / spacing
    # params_transform = linalg.inv(reg_func_transform * spacing)
    return {
        "affine_matrix": [],  # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
        "quality": 1  # float between 0 and 1 (if not available, set to 1.0)
    }

RegistrationMethodANTs3Din2D

RegistrationMethodANTs3Din2D

Bases: RegistrationMethod

Source code in src\registration_methods\RegistrationMethodANTs3Din2D.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
class RegistrationMethodANTs3Din2D(RegistrationMethod):
    def __init__(self, source, params, debug):
        super().__init__(source, params, debug)

    def registration(
            self,
            fixed_data,
            moving_data,
            *,
            fixed_origin,
            moving_origin,
            fixed_spacing,
            moving_spacing,
            initial_affine,
            transform_types=None,
            **ants_registration_kwargs,
        ):
        """
        Register two 3d sims by projecting them to 2d and using 2d registration.
        The z component of the resulting affine matrix is set to identity.
        """

        fixed_data = fixed_data.max('z')
        moving_data = moving_data.max('z')

        dims2d = ['y', 'x']
        fixed_origin = {dim: fixed_origin[dim] for dim in dims2d}
        moving_origin = {dim: moving_origin[dim] for dim in dims2d}
        fixed_spacing = {dim: fixed_spacing[dim] for dim in dims2d}
        moving_spacing = {dim: moving_spacing[dim] for dim in dims2d}

        initial_affine = initial_affine[1: , 1:]

        # call 2d registration on the projected sims
        # reg_res_2d = registration.phase_correlation_registration(
        #     sim1, sim2, **kwargs)
        reg_res_2d = registration.registration_ANTsPy(
            fixed_data,
            moving_data,
            fixed_origin=fixed_origin,
            moving_origin=moving_origin,
            fixed_spacing=fixed_spacing,
            moving_spacing=moving_spacing,
            initial_affine=initial_affine,
            transform_types=transform_types,
            **ants_registration_kwargs,
            )

        # embed resulting 2d affine matrix into 3d affine matrix
        reg_res_3d = deepcopy(reg_res_2d)
        reg_res_3d['affine_matrix'] = param_utils.identity_transform(3)
        reg_res_3d['affine_matrix'][1:, 1:] = reg_res_2d['affine_matrix']

        return reg_res_3d
__init__(source, params, debug)
Source code in src\registration_methods\RegistrationMethodANTs3Din2D.py
8
9
def __init__(self, source, params, debug):
    super().__init__(source, params, debug)
registration(fixed_data, moving_data, *, fixed_origin, moving_origin, fixed_spacing, moving_spacing, initial_affine, transform_types=None, **ants_registration_kwargs)

Register two 3d sims by projecting them to 2d and using 2d registration. The z component of the resulting affine matrix is set to identity.

Source code in src\registration_methods\RegistrationMethodANTs3Din2D.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
def registration(
        self,
        fixed_data,
        moving_data,
        *,
        fixed_origin,
        moving_origin,
        fixed_spacing,
        moving_spacing,
        initial_affine,
        transform_types=None,
        **ants_registration_kwargs,
    ):
    """
    Register two 3d sims by projecting them to 2d and using 2d registration.
    The z component of the resulting affine matrix is set to identity.
    """

    fixed_data = fixed_data.max('z')
    moving_data = moving_data.max('z')

    dims2d = ['y', 'x']
    fixed_origin = {dim: fixed_origin[dim] for dim in dims2d}
    moving_origin = {dim: moving_origin[dim] for dim in dims2d}
    fixed_spacing = {dim: fixed_spacing[dim] for dim in dims2d}
    moving_spacing = {dim: moving_spacing[dim] for dim in dims2d}

    initial_affine = initial_affine[1: , 1:]

    # call 2d registration on the projected sims
    # reg_res_2d = registration.phase_correlation_registration(
    #     sim1, sim2, **kwargs)
    reg_res_2d = registration.registration_ANTsPy(
        fixed_data,
        moving_data,
        fixed_origin=fixed_origin,
        moving_origin=moving_origin,
        fixed_spacing=fixed_spacing,
        moving_spacing=moving_spacing,
        initial_affine=initial_affine,
        transform_types=transform_types,
        **ants_registration_kwargs,
        )

    # embed resulting 2d affine matrix into 3d affine matrix
    reg_res_3d = deepcopy(reg_res_2d)
    reg_res_3d['affine_matrix'] = param_utils.identity_transform(3)
    reg_res_3d['affine_matrix'][1:, 1:] = reg_res_2d['affine_matrix']

    return reg_res_3d

RegistrationMethodCPD

RegistrationMethodCPD

Bases: RegistrationMethod

Source code in src\registration_methods\RegistrationMethodCPD.py
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
class RegistrationMethodCPD(RegistrationMethod):
    def detect_points(self, data0):
        data = data0.astype(self.source_type)
        area_points = detect_area_points(data)
        points = [point for point, area in area_points]
        return points

    def registration(self, fixed_data: SpatialImage, moving_data: SpatialImage, **kwargs) -> dict:
        max_iter = kwargs.get('max_iter', 1000)

        fixed_points = self.detect_points(fixed_data)
        moving_points = self.detect_points(moving_data)
        threshold = get_mean_nn_distance(fixed_points, moving_points)

        transform = None
        quality = 0
        if len(moving_points) > 1 and len(fixed_points) > 1:
            result_cpd = cpd.registration_cpd(points_to_3d(moving_points), points_to_3d(fixed_points),
                                              maxiter=max_iter)
            transformation = result_cpd.transformation
            S = transformation.scale * np.eye(3)
            R = transformation.rot
            T = np.eye(3) + np.hstack([np.zeros((3, 2)), transformation.t.reshape(-1, 1)])
            transform = T @ R @ S

            metrics = calc_match_metrics(fixed_points, moving_points, transform, threshold)
            quality = metrics['match_rate']

        if not validate_transform(transform, get_sim_physical_size(fixed_data, invert=True)):
            logging.error('Unable to find CPD registration')

        return {
            "affine_matrix": transform,  # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
            "quality": quality  # float between 0 and 1 (if not available, set to 1.0)
        }
detect_points(data0)
Source code in src\registration_methods\RegistrationMethodCPD.py
13
14
15
16
17
def detect_points(self, data0):
    data = data0.astype(self.source_type)
    area_points = detect_area_points(data)
    points = [point for point, area in area_points]
    return points
registration(fixed_data, moving_data, **kwargs)
Source code in src\registration_methods\RegistrationMethodCPD.py
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
def registration(self, fixed_data: SpatialImage, moving_data: SpatialImage, **kwargs) -> dict:
    max_iter = kwargs.get('max_iter', 1000)

    fixed_points = self.detect_points(fixed_data)
    moving_points = self.detect_points(moving_data)
    threshold = get_mean_nn_distance(fixed_points, moving_points)

    transform = None
    quality = 0
    if len(moving_points) > 1 and len(fixed_points) > 1:
        result_cpd = cpd.registration_cpd(points_to_3d(moving_points), points_to_3d(fixed_points),
                                          maxiter=max_iter)
        transformation = result_cpd.transformation
        S = transformation.scale * np.eye(3)
        R = transformation.rot
        T = np.eye(3) + np.hstack([np.zeros((3, 2)), transformation.t.reshape(-1, 1)])
        transform = T @ R @ S

        metrics = calc_match_metrics(fixed_points, moving_points, transform, threshold)
        quality = metrics['match_rate']

    if not validate_transform(transform, get_sim_physical_size(fixed_data, invert=True)):
        logging.error('Unable to find CPD registration')

    return {
        "affine_matrix": transform,  # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
        "quality": quality  # float between 0 and 1 (if not available, set to 1.0)
    }

apply_transform(points, transform)

Source code in src\util.py
371
372
373
374
375
376
377
378
379
def apply_transform(points, transform):
    new_points = []
    for point in points:
        point_len = len(point)
        while len(point) < len(transform):
            point = list(point) + [1]
        new_point = np.dot(point, np.transpose(np.array(transform)))
        new_points.append(new_point[:point_len])
    return new_points

blur_image(image, sigma)

Source code in src\image\util.py
432
433
434
435
436
437
438
439
440
def blur_image(image, sigma):
    nchannels = image.shape[2] if image.ndim == 3 else 1
    if nchannels not in [1, 3]:
        new_image = np.zeros_like(image)
        for channeli in range(nchannels):
            new_image[..., channeli] = blur_image_single(image[..., channeli], sigma)
    else:
        new_image = blur_image_single(image, sigma)
    return new_image

blur_image_single(image, sigma)

Source code in src\image\util.py
428
429
def blur_image_single(image, sigma):
    return gaussian_filter(image, sigma)

calc_foreground_map(sims)

Source code in src\image\util.py
564
565
566
567
568
569
570
571
572
573
574
575
576
577
def calc_foreground_map(sims):
    if len(sims) <= 2:
        return [True] * len(sims)
    sims = [sim.squeeze().astype(np.float32) for sim in sims]
    median_image = calc_images_median(sims).astype(np.float32)
    difs = [np.mean(np.abs(sim - median_image), (0, 1)) for sim in sims]
    # or use stddev instead of mean?
    threshold = np.mean(difs, 0)
    #threshold, _ = cv.threshold(np.array(difs).astype(np.uint16), 0, 1, cv.THRESH_OTSU)
    #threshold, foregrounds = filter_noise_images(channel_images)
    map = (difs > threshold)
    if np.all(map == False):
        return [True] * len(sims)
    return map

calc_images_median(images)

Source code in src\image\util.py
443
444
445
446
def calc_images_median(images):
    out_image = np.zeros(shape=images[0].shape, dtype=images[0].dtype)
    median_image = np.median(images, 0, out_image)
    return median_image

calc_images_quantiles(images, quantiles)

Source code in src\image\util.py
449
450
451
def calc_images_quantiles(images, quantiles):
    quantile_images = [image.astype(np.float32) for image in np.quantile(images, quantiles, 0)]
    return quantile_images

calc_output_properties(sims, transform_key, z_scale=None)

Source code in src\image\util.py
627
628
629
630
631
632
633
634
635
636
637
def calc_output_properties(sims, transform_key, z_scale=None):
    output_spacing = si_utils.get_spacing_from_sim(sims[0])
    if z_scale is not None:
        output_spacing['z'] = z_scale
    output_properties = fusion.calc_fusion_stack_properties(
        sims,
        [si_utils.get_affine_from_sim(sim, transform_key) for sim in sims],
        output_spacing,
        mode='union',
    )
    return output_properties

calc_pyramid(xyzct, npyramid_add=0, pyramid_downsample=2, volumetric_resize=False)

Source code in src\image\util.py
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def calc_pyramid(xyzct: tuple, npyramid_add: int = 0, pyramid_downsample: float = 2,
                 volumetric_resize: bool = False) -> list:
    x, y, z, c, t = xyzct
    if volumetric_resize and z > 1:
        size = (x, y, z)
    else:
        size = (x, y)
    sizes_add = []
    scale = 1
    for _ in range(npyramid_add):
        scale /= pyramid_downsample
        scaled_size = np.maximum(np.round(np.multiply(size, scale)).astype(int), 1)
        sizes_add.append(scaled_size)
    return sizes_add

check_round_significants(a, significant_digits)

Source code in src\util.py
113
114
115
116
117
118
119
120
121
def check_round_significants(a: float, significant_digits: int) -> float:
    rounded = round_significants(a, significant_digits)
    if a != 0:
        dif = 1 - rounded / a
    else:
        dif = rounded - a
    if abs(dif) < 10 ** -significant_digits:
        return rounded
    return a

color_image(image)

Source code in src\image\util.py
42
43
44
45
46
47
def color_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 1:
        return cv.cvtColor(np.array(image), cv.COLOR_GRAY2RGB)
    else:
        return image

combine_transforms(transforms)

Source code in src\image\util.py
709
710
711
712
713
714
715
716
def combine_transforms(transforms):
    combined_transform = None
    for transform in transforms:
        if combined_transform is None:
            combined_transform = transform
        else:
            combined_transform = np.dot(transform, combined_transform)
    return combined_transform

convert_image_sign_type(image, target_dtype)

Source code in src\image\util.py
 97
 98
 99
100
101
102
103
104
105
106
107
def convert_image_sign_type(image: np.ndarray, target_dtype: np.dtype) -> np.ndarray:
    source_dtype = image.dtype
    if source_dtype.kind == target_dtype.kind:
        new_image = image
    elif source_dtype.kind == 'i':
        new_image = ensure_unsigned_image(image)
    else:
        # conversion without overhead
        offset = 2 ** (8 * target_dtype.itemsize - 1)
        new_image = (image - offset).astype(target_dtype)
    return new_image

convert_rational_value(value)

Source code in src\util.py
321
322
323
324
325
326
327
def convert_rational_value(value) -> float:
    if value is not None and isinstance(value, tuple):
        if value[0] == value[1]:
            value = value[0]
        else:
            value = value[0] / value[1]
    return value

convert_to_um(value, unit)

Source code in src\util.py
310
311
312
313
314
315
316
317
318
def convert_to_um(value, unit):
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    return value * conversions.get(unit, 1)

create_compression_filter(compression)

Source code in src\image\util.py
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
def create_compression_filter(compression: list) -> tuple:
    compressor, compression_filters = None, None
    compression = ensure_list(compression)
    if compression is not None and len(compression) > 0:
        compression_type = compression[0].lower()
        if len(compression) > 1:
            level = int(compression[1])
        else:
            level = None
        if 'lzw' in compression_type:
            from imagecodecs.numcodecs import Lzw
            compression_filters = [Lzw()]
        elif '2k' in compression_type or '2000' in compression_type:
            from imagecodecs.numcodecs import Jpeg2k
            compression_filters = [Jpeg2k(level=level)]
        elif 'jpegls' in compression_type:
            from imagecodecs.numcodecs import Jpegls
            compression_filters = [Jpegls(level=level)]
        elif 'jpegxr' in compression_type:
            from imagecodecs.numcodecs import Jpegxr
            compression_filters = [Jpegxr(level=level)]
        elif 'jpegxl' in compression_type:
            from imagecodecs.numcodecs import Jpegxl
            compression_filters = [Jpegxl(level=level)]
        else:
            compressor = compression
    return compressor, compression_filters

create_transform(center, angle, matrix_size=3)

Source code in src\util.py
356
357
358
359
360
361
362
363
364
365
366
367
368
def create_transform(center, angle, matrix_size=3):
    if isinstance(center, dict):
        center = dict_to_xyz(center)
    if len(center) == 2:
        center = np.array(list(center) + [0])
    if angle is None:
        angle = 0
    r = Rotation.from_euler('z', angle, degrees=True)
    t = center - r.apply(center, inverse=True)
    transform = np.eye(matrix_size)
    transform[:3, :3] = np.transpose(r.as_matrix())
    transform[:3, -1] += t
    return transform

create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0))

Source code in src\util.py
348
349
350
351
352
353
def create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0)):
    transform = cv.getRotationMatrix2D(center[:2], angle, scale)
    transform[:, 2] += translate
    if len(transform) == 2:
        transform = np.vstack([transform, [0, 0, 1]])   # create 3x3 matrix
    return transform

desc_to_dict(desc)

Source code in src\util.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def desc_to_dict(desc: str) -> dict:
    desc_dict = {}
    if desc.startswith('{'):
        try:
            metadata = ast.literal_eval(desc)
            return metadata
        except:
            pass
    for item in re.split(r'[\r\n\t|]', desc):
        item_sep = '='
        if ':' in item:
            item_sep = ':'
        if item_sep in item:
            items = item.split(item_sep)
            key = items[0].strip()
            value = items[1].strip()
            for dtype in (int, float, bool):
                try:
                    value = dtype(value)
                    break
                except:
                    pass
            desc_dict[key] = value
    return desc_dict

detect_area_points(image)

Source code in src\image\util.py
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
def detect_area_points(image):
    method = cv.THRESH_OTSU
    threshold = -5
    contours = []
    while len(contours) <= 1 and threshold <= 255:
        _, binimage = cv.threshold(np.array(uint8_image(image)), threshold, 255, method)
        contours0 = cv.findContours(binimage, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
        contours = contours0[0] if len(contours0) == 2 else contours0[1]
        method = cv.THRESH_BINARY
        threshold += 5
    area_contours = [(contour, cv.contourArea(contour)) for contour in contours]
    area_contours.sort(key=lambda contour_area: contour_area[1], reverse=True)
    min_area = max(np.mean([area for contour, area in area_contours]), 1)
    area_points = [(get_center(contour), area) for contour, area in area_contours if area > min_area]

    #image = cv.cvtColor(image, cv.COLOR_GRAY2BGR)
    #for point in area_points:
    #    radius = int(np.round(np.sqrt(point[1]/np.pi)))
    #    cv.circle(image, tuple(np.round(point[0]).astype(int)), radius, (255, 0, 0), -1)
    #show_image(image)
    return area_points

dict_to_xyz(dct, keys='xyz')

Source code in src\util.py
452
453
def dict_to_xyz(dct, keys='xyz'):
    return [dct[key] for key in keys if key in dct]

dir_regex(pattern)

Source code in src\util.py
141
142
143
144
145
146
def dir_regex(pattern):
    files = []
    for pattern_item in ensure_list(pattern):
        files.extend(glob.glob(pattern_item, recursive=True))
    files_sorted = sorted(files, key=lambda file: find_all_numbers(get_filetitle(file)))
    return files_sorted

draw_edge_filter(bounds)

Source code in src\util.py
496
497
498
499
500
501
502
503
504
def draw_edge_filter(bounds):
    out_image = np.zeros(np.flip(bounds))
    y, x = np.where(out_image == 0)
    points = np.transpose([x, y])

    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) * 10, 0, 1)
    return position_weights.reshape(np.flip(bounds))

draw_keypoints(image, points, color=(255, 0, 0))

Source code in src\image\util.py
279
280
281
282
283
284
def draw_keypoints(image, points, color=(255, 0, 0)):
    out_image = color_image(float2int_image(image))
    for point in points:
        point = np.round(point).astype(int)
        cv.drawMarker(out_image, tuple(point), color=color, markerType=cv.MARKER_CROSS, markerSize=5, thickness=1)
    return out_image

draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None, points_color='black', match_color='red', inlier_color='lime', show_plot=True, output_filename=None)

Source code in src\image\util.py
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
def draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None,
                           points_color='black', match_color='red', inlier_color='lime',
                           show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape = np.max([image.shape for image in [image1, image2]], axis=0)
    shape_y, shape_x = shape[:2]
    if shape_x > 2 * shape_y:
        merge_axis = 0
        offset2 = [shape_y, 0]
    else:
        merge_axis = 1
        offset2 = [0, shape_x]
    image = np.concatenate([
        np.pad(image1, ((0, shape[0] - image1.shape[0]), (0, shape[1] - image1.shape[1]))),
        np.pad(image2, ((0, shape[0] - image2.shape[0]), (0, shape[1] - image2.shape[1])))
    ], axis=merge_axis)
    ax.imshow(image, cmap='gray')

    ax.scatter(
        points1[:, 1],
        points1[:, 0],
        facecolors='none',
        edgecolors=points_color,
    )
    ax.scatter(
        points2[:, 1] + offset2[1],
        points2[:, 0] + offset2[0],
        facecolors='none',
        edgecolors=points_color,
    )

    for i, match in enumerate(matches):
        color = match_color
        if i < len(inliers) and inliers[i]:
            color = inlier_color
        index1, index2 = match
        ax.plot(
            (points1[index1, 1], points2[index2, 1] + offset2[1]),
            (points1[index1, 0], points2[index2, 0] + offset2[0]),
            '-', linewidth=1, alpha=0.5, color=color,
        )

    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

    return fig, ax

draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None, color=(255, 0, 0), inlier_color=(0, 255, 0), radius=15, thickness=2)

Source code in src\image\util.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
def draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None,
                              color=(255, 0, 0), inlier_color=(0, 255, 0), radius = 15, thickness = 2):
    # based on https://gist.github.com/woolpeeker/d7e1821e1b5c556b32aafe10b7a1b7e8
    image1 = uint8_image(image1)
    image2 = uint8_image(image2)
    # We're drawing them side by side.  Get dimensions accordingly.
    new_shape = (max(image1.shape[0], image2.shape[0]), image1.shape[1] + image2.shape[1], 3)
    out_image = np.zeros(new_shape, image1.dtype)
    # Place images onto the new image.
    out_image[0:image1.shape[0], 0:image1.shape[1]] = color_image(image1)
    out_image[0:image2.shape[0], image1.shape[1]:image1.shape[1] + image2.shape[1]] = color_image(image2)

    if matches is not None:
        # Draw lines between matches.  Make sure to offset kp coords in second image appropriately.
        for index, match in enumerate(matches):
            if inliers is not None and inliers[index]:
                line_color = inlier_color
            else:
                line_color = color
            # So the keypoint locs are stored as a tuple of floats.  cv2.line() wants locs as a tuple of ints.
            end1 = tuple(np.round(points1[match[0]]).astype(int))
            end2 = tuple(np.round(points2[match[1]]).astype(int) + np.array([image1.shape[1], 0]))
            cv.line(out_image, end1, end2, line_color, thickness)
            cv.circle(out_image, end1, radius, line_color, thickness)
            cv.circle(out_image, end2, radius, line_color, thickness)
    else:
        # Draw all points if no matches are provided.
        for point in points1:
            point = tuple(np.round(point).astype(int))
            cv.circle(out_image, point, radius, color, thickness)
        for point in points2:
            point = tuple(np.round(point).astype(int) + np.array([image1.shape[1], 0]))
            cv.circle(out_image, point, radius, color, thickness)
    return out_image

draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None, show_plot=True, output_filename=None)

Source code in src\image\util.py
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
def draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None,
                              show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape_y, shape_x = image1.shape[:2]
    if shape_x > 2 * shape_y:
        alignment = 'vertical'
    else:
        alignment = 'horizontal'
    plot_matched_features(
        image1,
        image2,
        keypoints0=points1,
        keypoints1=points2,
        matches=matches,
        ax=ax,
        alignment=alignment,
        only_matches=True,
    )
    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

ensure_list(x)

Source code in src\util.py
18
19
20
21
22
23
24
def ensure_list(x) -> list:
    if x is None:
        return []
    elif isinstance(x, list):
        return x
    else:
        return [x]

ensure_unsigned_image(image)

Source code in src\image\util.py
85
86
87
88
89
90
91
92
93
94
def ensure_unsigned_image(image: np.ndarray) -> np.ndarray:
    source_dtype = image.dtype
    dtype = ensure_unsigned_type(source_dtype)
    if dtype != source_dtype:
        # conversion without overhead
        offset = 2 ** (8 * dtype.itemsize - 1)
        new_image = image.astype(dtype) + offset
    else:
        new_image = image
    return new_image

ensure_unsigned_type(dtype)

Source code in src\image\util.py
78
79
80
81
82
def ensure_unsigned_type(dtype: np.dtype) -> np.dtype:
    new_dtype = dtype
    if dtype.kind == 'i' or dtype.byteorder == '>' or dtype.byteorder == '<':
        new_dtype = np.dtype(f'u{dtype.itemsize}')
    return new_dtype

eval_context(data, key, default_value, context)

Source code in src\util.py
266
267
268
269
270
271
272
273
274
275
276
277
def eval_context(data, key, default_value, context):
    value = data.get(key, default_value)
    if isinstance(value, str):
        try:
            value = value.format_map(context)
        except:
            pass
        try:
            value = eval(value, context)
        except:
            pass
    return value

export_csv(filename, data, header=None)

Source code in src\util.py
602
603
604
605
606
607
608
def export_csv(filename, data, header=None):
    with open(filename, 'w', encoding='utf8', newline='') as file:
        csvwriter = csv.writer(file)
        if header is not None:
            csvwriter.writerow(header)
        for row in data:
            csvwriter.writerow(row)

export_json(filename, data)

Source code in src\util.py
591
592
593
def export_json(filename, data):
    with open(filename, 'w', encoding='utf8') as file:
        json.dump(data, file, indent=4)

filter_dict(dict0)

Source code in src\util.py
38
39
40
41
42
43
44
45
46
47
48
49
50
def filter_dict(dict0: dict) -> dict:
    new_dict = {}
    for key, value0 in dict0.items():
        if value0 is not None:
            values = []
            for value in ensure_list(value0):
                if isinstance(value, dict):
                    value = filter_dict(value)
                values.append(value)
            if len(values) == 1:
                values = values[0]
            new_dict[key] = values
    return new_dict

filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5)

Source code in src\util.py
487
488
489
490
491
492
493
def filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5):
    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) / filter_factor, 0, 1)
    order_weights = 1 - np.array(range(len(points))) / len(points) / 2
    weights = position_weights * order_weights
    return weights > threshold

filter_noise_images(images)

Source code in src\image\util.py
508
509
510
511
512
513
514
def filter_noise_images(images):
    dtype = images[0].dtype
    maxval = 2 ** (8 * dtype.itemsize) - 1
    image_vars = [np.asarray(np.std(image)).item() for image in images]
    threshold, mask0 = cv.threshold(np.array(image_vars).astype(dtype), 0, maxval, cv.THRESH_OTSU)
    mask = [flag.item() for flag in mask0.astype(bool)]
    return int(threshold), mask

find_all_numbers(text)

Source code in src\util.py
149
150
def find_all_numbers(text: str) -> list:
    return list(map(int, re.findall(r'\d+', text)))

float2int_image(image, target_dtype=np.dtype(np.uint8))

Source code in src\image\util.py
59
60
61
62
63
64
65
def float2int_image(image, target_dtype=np.dtype(np.uint8)):
    source_dtype = image.dtype
    if source_dtype.kind not in ('i', 'u') and not target_dtype.kind == 'f':
        maxval = 2 ** (8 * target_dtype.itemsize) - 1
        return (image * maxval).astype(target_dtype)
    else:
        return image

get_center(data, offset=(0, 0))

Source code in src\util.py
339
340
341
342
343
344
345
def get_center(data, offset=(0, 0)):
    moments = get_moments(data, offset=offset)
    if moments['m00'] != 0:
        center = get_moments_center(moments)
    else:
        center = np.mean(data, 0).flatten()  # close approximation
    return center.astype(np.float32)

get_center_from_transform(transform)

Source code in src\util.py
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
def get_center_from_transform(transform):
    # from opencv:
    # t0 = (1-alpha) * cx - beta * cy
    # t1 = beta * cx + (1-alpha) * cy
    # where
    # alpha = cos(angle) * scale
    # beta = sin(angle) * scale
    # isolate cx and cy:
    t0, t1 = transform[:2, 2]
    scale = 1
    angle = np.arctan2(transform[0][1], transform[0][0])
    alpha = np.cos(angle) * scale
    beta = np.sin(angle) * scale
    cx = (t1 + t0 * (1 - alpha) / beta) / (beta + (1 - alpha) ** 2 / beta)
    cy = ((1 - alpha) * cx - t0) / beta
    return cx, cy

get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None)

Source code in src\image\util.py
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
def get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None):
    if rotation is None:
        rotation = 0

    if isinstance(data, DataTree):
        sim = msi_utils.get_sim_from_msim(data)
    else:
        sim = data
    sdims = ''.join(si_utils.get_spatial_dims_from_sim(sim))
    sdims = sdims.replace('zyx', 'xyz').replace('yx', 'xy')   # order xy(z)
    origin = si_utils.get_origin_from_sim(sim)
    translation = [origin[sdim] for sdim in sdims]

    if len(translation) == 0:
        translation = [0, 0]
    if len(translation) == 2:
        if translation0 is not None and len (translation0) == 3:
            z = translation0[2]
        else:
            z = 0
        translation = list(translation) + [z]

    if transform is not None:
        translation1, rotation1, _ = get_properties_from_transform(transform, invert=True)
        translation = np.array(translation) + translation1
        rotation += rotation1

    if transform_key is not None:
        transform1 = sim.transforms[transform_key]
        translation1, rotation1, _ = get_properties_from_transform(transform1, invert=True)
        rotation += rotation1

    return translation, rotation

get_default(x, default)

Source code in src\util.py
14
15
def get_default(x, default):
    return default if x is None else x

get_filetitle(filename)

Source code in src\util.py
135
136
137
138
def get_filetitle(filename: str) -> str:
    filebase = os.path.basename(filename)
    title = os.path.splitext(filebase)[0].rstrip('.ome')
    return title

get_image_quantile(image, quantile, axis=None)

Source code in src\image\util.py
454
455
456
def get_image_quantile(image: np.ndarray, quantile: float, axis=None) -> float:
    value = np.quantile(image, quantile, axis=axis).astype(image.dtype)
    return value

get_image_size_info(sizes_xyzct, pixel_nbytes, pixel_type, channels)

Source code in src\image\util.py
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def get_image_size_info(sizes_xyzct: list, pixel_nbytes: int, pixel_type: np.dtype, channels: list) -> str:
    image_size_info = 'XYZCT:'
    size = 0
    for i, size_xyzct in enumerate(sizes_xyzct):
        w, h, zs, cs, ts = size_xyzct
        size += np.int64(pixel_nbytes) * w * h * zs * cs * ts
        if i > 0:
            image_size_info += ','
        image_size_info += f' {w} {h} {zs} {cs} {ts}'
    image_size_info += f' Pixel type: {pixel_type} Uncompressed: {print_hbytes(size)}'
    if sizes_xyzct[0][3] == 3:
        channel_info = 'rgb'
    else:
        channel_info = ','.join([channel.get('Name', '') for channel in channels])
    if channel_info != '':
        image_size_info += f' Channels: {channel_info}'
    return image_size_info

get_image_window(image, low=0.01, high=0.99)

Source code in src\image\util.py
459
460
461
462
463
464
def get_image_window(image, low=0.01, high=0.99):
    window = (
        get_image_quantile(image, low),
        get_image_quantile(image, high)
    )
    return window

get_max_downsamples(shape, npyramid_add, pyramid_downsample)

Source code in src\image\util.py
498
499
500
501
502
503
504
505
def get_max_downsamples(shape, npyramid_add, pyramid_downsample):
    shape = list(shape)
    for i in range(npyramid_add):
        shape[-1] //= pyramid_downsample
        shape[-2] //= pyramid_downsample
        if shape[-1] < 1 or shape[-2] < 1:
            return i
    return npyramid_add

get_mean_nn_distance(points1, points2)

Source code in src\util.py
483
484
def get_mean_nn_distance(points1, points2):
    return np.mean([get_nn_distance(points1), get_nn_distance(points2)])

get_moments(data, offset=(0, 0))

Source code in src\util.py
330
331
332
def get_moments(data, offset=(0, 0)):
    moments = cv.moments((np.array(data) + offset).astype(np.float32))    # doesn't work for float64!
    return moments

get_moments_center(moments, offset=(0, 0))

Source code in src\util.py
335
336
def get_moments_center(moments, offset=(0, 0)):
    return np.array([moments['m10'], moments['m01']]) / moments['m00'] + np.array(offset)

get_nn_distance(points0)

Source code in src\util.py
472
473
474
475
476
477
478
479
480
def get_nn_distance(points0):
    points = list(set(map(tuple, points0)))     # get unique points
    if len(points) >= 2:
        tree = KDTree(points, leaf_size=2)
        dist, ind = tree.query(points, k=2)
        nn_distance = np.median(dist[:, 1])
    else:
        nn_distance = 1
    return nn_distance

get_numpy_slicing(dimension_order, **slicing)

Source code in src\image\util.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
def get_numpy_slicing(dimension_order, **slicing):
    slices = []
    for axis in dimension_order:
        index = slicing.get(axis)
        index0 = slicing.get(axis + '0')
        index1 = slicing.get(axis + '1')
        if index0 is not None and index1 is not None:
            slice1 = slice(int(index0), int(index1))
        elif index is not None:
            slice1 = int(index)
        else:
            slice1 = slice(None)
        slices.append(slice1)
    return tuple(slices)

get_orthogonal_pairs(origins, image_size_um)

Get pairs of orthogonal neighbors from a list of tiles. Tiles don't have to be placed on a regular grid.

Source code in src\util.py
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
def get_orthogonal_pairs(origins, image_size_um):
    """
    Get pairs of orthogonal neighbors from a list of tiles.
    Tiles don't have to be placed on a regular grid.
    """
    pairs = []
    angles = []
    z_positions = [pos[0] for pos in origins if len(pos) == 3]
    ordered_z = sorted(set(z_positions))
    is_mixed_3dstack = len(ordered_z) < len(z_positions)
    for i, j in np.transpose(np.triu_indices(len(origins), 1)):
        origini = np.array(origins[i])
        originj = np.array(origins[j])
        if is_mixed_3dstack:
            # ignore z value for distance
            distance = math.dist(origini[-2:], originj[-2:])
            min_distance = max(image_size_um[-2:])
            z_i, z_j = origini[0], originj[0]
            is_same_z = (z_i == z_j)
            is_close_z = abs(ordered_z.index(z_i) - ordered_z.index(z_j)) <= 1
            if not is_same_z:
                # for tiles in different z stack, require greater overlap
                min_distance *= 0.8
            ok = (distance < min_distance and is_close_z)
        else:
            distance = math.dist(origini, originj)
            min_distance = max(image_size_um)
            ok = (distance < min_distance)
        if ok:
            pairs.append((i, j))
            vector = origini - originj
            angle = math.degrees(math.atan2(vector[1], vector[0]))
            if distance < min(image_size_um):
                angle += 90
            while angle < -90:
                angle += 180
            while angle > 90:
                angle -= 180
            angles.append(angle)
    return pairs, angles

get_properties_from_transform(transform, invert=False)

Source code in src\image\util.py
660
661
662
663
664
665
666
667
668
669
670
671
def get_properties_from_transform(transform, invert=False):
    if len(transform.shape) == 3:
        transform = transform[0]
    if invert:
        transform = param_utils.invert_coordinate_order(transform)
    transform = np.array(transform)
    translation = param_utils.translation_from_affine(transform)
    if len(translation) == 2:
        translation = list(translation) + [0]
    rotation = get_rotation_from_transform(transform)
    scale = get_scale_from_transform(transform)
    return translation, rotation, scale

get_rotation_from_transform(transform)

Source code in src\util.py
427
428
429
def get_rotation_from_transform(transform):
    rotation = np.rad2deg(np.arctan2(transform[0][1], transform[0][0]))
    return rotation

get_scale_from_transform(transform)

Source code in src\util.py
397
398
399
def get_scale_from_transform(transform):
    scale = np.mean(np.linalg.norm(transform, axis=0)[:-1])
    return scale

get_sim_physical_size(sim, invert=False)

Source code in src\image\util.py
620
621
622
623
624
def get_sim_physical_size(sim, invert=False):
    size = si_utils.get_shape_from_sim(sim, asarray=True) * si_utils.get_spacing_from_sim(sim, asarray=True)
    if invert:
        size = np.flip(size)
    return size

get_sim_position_final(sim)

Source code in src\image\util.py
540
541
542
543
544
545
def get_sim_position_final(sim):
    transform_keys = si_utils.get_tranform_keys_from_sim(sim)
    transform = combine_transforms([np.array(si_utils.get_affine_from_sim(sim, transform_key))
                                    for transform_key in transform_keys])
    position = apply_transform([si_utils.get_origin_from_sim(sim, asarray=True)], transform)[0]
    return position

get_sim_shape_2d(sim, transform_key=None)

Source code in src\image\util.py
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
def get_sim_shape_2d(sim, transform_key=None):
    if 't' in sim.coords.xindexes:
        # work-around for points error in get_overlap_bboxes()
        sim1 = si_utils.sim_sel_coords(sim, {'t': 0})
    else:
        sim1 = sim
    stack_props = si_utils.get_stack_properties_from_sim(sim1, transform_key=transform_key)
    vertices = mv_graph.get_vertices_from_stack_props(stack_props)
    if vertices.shape[1] == 3:
        # remove z coordinate
        vertices = vertices[:, 1:]
    if len(vertices) >= 8:
        # remove redundant x/y vertices
        vertices = vertices[:4]
    if len(vertices) >= 4:
        # last 2 vertices appear to be swapped
        vertices[2:] = np.array(list(reversed(vertices[2:])))
    return vertices

get_translation_from_transform(transform)

Source code in src\util.py
402
403
404
405
406
def get_translation_from_transform(transform):
    ndim = len(transform) - 1
    #translation = transform[:ndim, ndim]
    translation = apply_transform([[0] * ndim], transform)[0]
    return translation

get_unique_file_labels(filenames)

Source code in src\util.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def get_unique_file_labels(filenames: list) -> list:
    file_labels = []
    file_parts = []
    label_indices = set()
    last_parts = None
    for filename in filenames:
        parts = split_numeric(filename)
        if len(parts) == 0:
            parts = split_numeric(filename)
            if len(parts) == 0:
                parts = filename
        file_parts.append(parts)
        if last_parts is not None:
            for parti, (part1, part2) in enumerate(zip(last_parts, parts)):
                if part1 != part2:
                    label_indices.add(parti)
        last_parts = parts
    label_indices = sorted(list(label_indices))

    for file_part in file_parts:
        file_label = '_'.join([file_part[i] for i in label_indices])
        file_labels.append(file_label)

    if len(set(file_labels)) < len(file_labels):
        # fallback for duplicate labels
        file_labels = [get_filetitle(filename) for filename in filenames]

    return file_labels

get_value_units_micrometer(value_units0)

Source code in src\util.py
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def get_value_units_micrometer(value_units0: list|dict) -> list|dict|None:
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    if value_units0 is None:
        return None

    if isinstance(value_units0, dict):
        values_um = {}
        for dim, value_unit in value_units0.items():
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um[dim] = value_um
    else:
        values_um = []
        for value_unit in value_units0:
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um.append(value_um)
    return values_um

grayscale_image(image)

Source code in src\image\util.py
32
33
34
35
36
37
38
39
def grayscale_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 4:
        return cv.cvtColor(image, cv.COLOR_RGBA2GRAY)
    elif nchannels > 1:
        return cv.cvtColor(image, cv.COLOR_RGB2GRAY)
    else:
        return image

group_sims_by_z(sims)

Source code in src\image\util.py
548
549
550
551
552
553
554
555
556
557
558
559
560
561
def group_sims_by_z(sims):
    grouped_sims = []
    z_positions = [si_utils.get_origin_from_sim(sim).get('z') for sim in sims]
    is_mixed_3dstack = len(set(z_positions)) < len(z_positions)
    if is_mixed_3dstack:
        sims_by_z = {}
        for simi, z_pos in enumerate(z_positions):
            if z_pos is not None and z_pos not in sims_by_z:
                sims_by_z[z_pos] = []
            sims_by_z[z_pos].append(simi)
        grouped_sims = list(sims_by_z.values())
    if len(grouped_sims) == 0:
        grouped_sims = [list(range(len(sims)))]
    return grouped_sims

image_reshape(image, target_size)

Source code in src\image\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
def image_reshape(image: np.ndarray, target_size: tuple) -> np.ndarray:
    tw, th = target_size
    sh, sw = image.shape[0:2]
    if sw < tw or sh < th:
        dw = max(tw - sw, 0)
        dh = max(th - sh, 0)
        padding = [(dh // 2, dh - dh //  2), (dw // 2, dw - dw // 2)]
        if len(image.shape) == 3:
            padding += [(0, 0)]
        image = np.pad(image, padding, mode='constant', constant_values=(0, 0))
    if tw < sw or th < sh:
        image = image[0:th, 0:tw]
    return image

image_resize(image, target_size0, dimension_order='yxc')

Source code in src\image\util.py
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
def image_resize(image: np.ndarray, target_size0: tuple, dimension_order: str = 'yxc') -> np.ndarray:
    shape = image.shape
    x_index = dimension_order.index('x')
    y_index = dimension_order.index('y')
    c_is_at_end = ('c' in dimension_order and dimension_order.endswith('c'))
    size = shape[x_index], shape[y_index]
    if np.mean(np.divide(size, target_size0)) < 1:
        interpolation = cv.INTER_CUBIC
    else:
        interpolation = cv.INTER_AREA
    dtype0 = image.dtype
    image = ensure_unsigned_image(image)
    target_size = tuple(np.maximum(np.round(target_size0).astype(int), 1))
    if dimension_order in ['yxc', 'yx']:
        new_image = cv.resize(np.asarray(image), target_size, interpolation=interpolation)
    elif dimension_order == 'cyx':
        new_image = np.moveaxis(image, 0, -1)
        new_image = cv.resize(np.asarray(new_image), target_size, interpolation=interpolation)
        new_image = np.moveaxis(new_image, -1, 0)
    else:
        ts = image.shape[dimension_order.index('t')] if 't' in dimension_order else 1
        zs = image.shape[dimension_order.index('z')] if 'z' in dimension_order else 1
        target_shape = list(image.shape).copy()
        target_shape[x_index] = target_size[0]
        target_shape[y_index] = target_size[1]
        new_image = np.zeros(target_shape, dtype=image.dtype)
        for t in range(ts):
            for z in range(zs):
                slices = get_numpy_slicing(dimension_order, z=z, t=t)
                image1 = image[slices]
                if not c_is_at_end:
                    image1 = np.moveaxis(image1, 0, -1)
                new_image1 = np.atleast_3d(cv.resize(np.asarray(image1), target_size, interpolation=interpolation))
                if not c_is_at_end:
                    new_image1 = np.moveaxis(new_image1, -1, 0)
                new_image[slices] = new_image1
    new_image = convert_image_sign_type(new_image, dtype0)
    return new_image

import_csv(filename)

Source code in src\util.py
596
597
598
599
def import_csv(filename):
    with open(filename, encoding='utf8') as file:
        data = csv.reader(file)
    return data

import_json(filename)

Source code in src\util.py
585
586
587
588
def import_json(filename):
    with open(filename, encoding='utf8') as file:
        data = json.load(file)
    return data

import_metadata(content, fields=None, input_path=None)

Source code in src\util.py
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
def import_metadata(content, fields=None, input_path=None):
    # return dict[id] = {values}
    if isinstance(content, str):
        ext = os.path.splitext(content)[1].lower()
        if input_path:
            if isinstance(input_path, list):
                input_path = input_path[0]
            content = os.path.normpath(os.path.join(os.path.dirname(input_path), content))
        if ext == '.csv':
            content = import_csv(content)
        elif ext in ['.json', '.ome.json']:
            content = import_json(content)
    if fields is not None:
        content = [[data[field] for field in fields] for data in content]
    return content

int2float_image(image)

Source code in src\image\util.py
50
51
52
53
54
55
56
def int2float_image(image):
    source_dtype = image.dtype
    if not source_dtype.kind == 'f':
        maxval = 2 ** (8 * source_dtype.itemsize) - 1
        return image / np.float32(maxval)
    else:
        return image

norm_image_quantiles(image0, quantile=0.99)

Source code in src\image\util.py
484
485
486
487
488
489
490
491
492
493
494
495
def norm_image_quantiles(image0, quantile=0.99):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    min_value = np.quantile(image, 1 - quantile)
    max_value = np.quantile(image, quantile)
    normimage = (image - np.mean(image)) / (max_value - min_value)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

norm_image_variance(image0)

Source code in src\image\util.py
472
473
474
475
476
477
478
479
480
481
def norm_image_variance(image0):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    normimage = (image - np.mean(image)) / np.std(image)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

normalise(sims, transform_key, use_global=True)

Source code in src\image\util.py
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
def normalise(sims, transform_key, use_global=True):
    new_sims = []
    dtype = sims[0].dtype
    # global mean and stddev
    if use_global:
        mins = []
        ranges = []
        for sim in sims:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
            #min, max = get_image_window(sim, low=0.01, high=0.99)
            #range = max - min
            mins.append(min)
            ranges.append(range)
        min = np.mean(mins)
        range = np.mean(ranges)
    else:
        min = 0
        range = 1
    # normalise all images
    for sim in sims:
        if not use_global:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
        image = (sim - min) / range
        image = float2int_image(image.clip(0, 1), dtype)    # np.clip(image) is not dask-compatible, use image.clip() instead
        new_sim = si_utils.get_sim_from_array(
            image,
            dims=sim.dims,
            scale=si_utils.get_spacing_from_sim(sim),
            translation=si_utils.get_origin_from_sim(sim),
            transform_key=transform_key,
            affine=si_utils.get_affine_from_sim(sim, transform_key),
            c_coords=sim.c.data,
            t_coords=sim.t.data
        )
        new_sims.append(new_sim)
    return new_sims

normalise_rotated_positions(positions0, rotations0, size, center)

Source code in src\util.py
456
457
458
459
460
461
462
463
464
465
466
467
468
469
def normalise_rotated_positions(positions0, rotations0, size, center):
    # in [xy(z)]
    positions = []
    rotations = []
    _, angles = get_orthogonal_pairs(positions0, size)
    for position0, rotation in zip(positions0, rotations0):
        if rotation is None and len(angles) > 0:
            rotation = -np.mean(angles)
        angle = -rotation if rotation is not None else None
        transform = create_transform(center=center, angle=angle, matrix_size=4)
        position = apply_transform([position0], transform)[0]
        positions.append(position)
        rotations.append(rotation)
    return positions, rotations

normalise_rotation(rotation)

Normalise rotation to be in the range [-180, 180].

Source code in src\util.py
432
433
434
435
436
437
438
439
440
def normalise_rotation(rotation):
    """
    Normalise rotation to be in the range [-180, 180].
    """
    while rotation < -180:
        rotation += 360
    while rotation > 180:
        rotation -= 360
    return rotation

normalise_values(image, min_value, max_value)

Source code in src\image\util.py
467
468
469
def normalise_values(image: np.ndarray, min_value: float, max_value: float) -> np.ndarray:
    image = (image.astype(np.float32) - min_value) / (max_value - min_value)
    return image.clip(0, 1)

pilmode_to_pixelinfo(mode)

Source code in src\image\util.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
def pilmode_to_pixelinfo(mode: str) -> tuple:
    pixelinfo = (np.uint8, 8, 1)
    mode_types = {
        'I': (np.uint32, 32, 1),
        'F': (np.float32, 32, 1),
        'RGB': (np.uint8, 24, 3),
        'RGBA': (np.uint8, 32, 4),
        'CMYK': (np.uint8, 32, 4),
        'YCbCr': (np.uint8, 24, 3),
        'LAB': (np.uint8, 24, 3),
        'HSV': (np.uint8, 24, 3),
    }
    if '16' in mode:
        pixelinfo = (np.uint16, 16, 1)
    elif '32' in mode:
        pixelinfo = (np.uint32, 32, 1)
    elif mode in mode_types:
        pixelinfo = mode_types[mode]
    pixelinfo = (np.dtype(pixelinfo[0]), pixelinfo[1])
    return pixelinfo

precise_resize(image, factors)

Source code in src\image\util.py
272
273
274
275
276
def precise_resize(image: np.ndarray, factors) -> np.ndarray:
    if image.ndim > len(factors):
        factors = list(factors) + [1]
    new_image = downscale_local_mean(np.asarray(image), tuple(factors)).astype(image.dtype)
    return new_image

print_dict(dct, indent=0)

Source code in src\util.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def print_dict(dct: dict, indent: int = 0) -> str:
    s = ''
    if isinstance(dct, dict):
        for key, value in dct.items():
            s += '\n'
            if not isinstance(value, list):
                s += '\t' * indent + str(key) + ': '
            if isinstance(value, dict):
                s += print_dict(value, indent=indent + 1)
            elif isinstance(value, list):
                for v in value:
                    s += print_dict(v)
            else:
                s += str(value)
    else:
        s += str(dct)
    return s

print_hbytes(nbytes)

Source code in src\util.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def print_hbytes(nbytes: int) -> str:
    exps = ['', 'K', 'M', 'G', 'T', 'P', 'E']
    div = 1024
    exp = 0

    while nbytes > div:
        nbytes /= div
        exp += 1
    if exp < len(exps):
        e = exps[exp]
    else:
        e = f'e{exp * 3}'
    return f'{nbytes:.1f}{e}B'

redimension_data(data, old_order, new_order, **indices)

Source code in src\image\util.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def redimension_data(data, old_order, new_order, **indices):
    # able to provide optional dimension values e.g. t=0, z=0
    if new_order == old_order:
        return data

    new_data = data
    order = old_order
    # remove
    for o in old_order:
        if o not in new_order:
            index = order.index(o)
            dim_value = indices.get(o, 0)
            new_data = np.take(new_data, indices=dim_value, axis=index)
            order = order[:index] + order[index + 1:]
    # add
    for o in new_order:
        if o not in order:
            new_data = np.expand_dims(new_data, 0)
            order = o + order
    # move
    old_indices = [order.index(o) for o in new_order]
    new_indices = list(range(len(new_order)))
    new_data = np.moveaxis(new_data, old_indices, new_indices)
    return new_data

reorder(items, old_order, new_order, default_value=0)

Source code in src\util.py
27
28
29
30
31
32
33
34
35
def reorder(items: list, old_order: str, new_order: str, default_value: int = 0) -> list:
    new_items = []
    for label in new_order:
        if label in old_order:
            item = items[old_order.index(label)]
        else:
            item = default_value
        new_items.append(item)
    return new_items

resize_image(image, new_size)

Source code in src\image\util.py
224
225
226
227
228
229
def resize_image(image, new_size):
    if not isinstance(new_size, (tuple, list, np.ndarray)):
        # use single value for width; apply aspect ratio
        size = np.flip(image.shape[:2])
        new_size = new_size, new_size * size[1] // size[0]
    return cv.resize(image, new_size)

retuple(chunks, shape)

Expand chunks to match shape.

E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028) return (3, 4, 5, 64, 64)

If chunks is an integer, it is applied to all dimensions, to match the behaviour of zarr-python.

Source code in src\util.py
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def retuple(chunks, shape):
    # from ome-zarr-py
    """
    Expand chunks to match shape.

    E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028)
    return (3, 4, 5, 64, 64)

    If chunks is an integer, it is applied to all dimensions, to match
    the behaviour of zarr-python.
    """

    if isinstance(chunks, int):
        return tuple([chunks] * len(shape))

    dims_to_add = len(shape) - len(chunks)
    return *shape[:dims_to_add], *chunks

round_significants(a, significant_digits)

Source code in src\util.py
124
125
126
127
128
def round_significants(a: float, significant_digits: int) -> float:
    if a != 0:
        round_decimals = significant_digits - int(np.floor(np.log10(abs(a)))) - 1
        return round(a, round_decimals)
    return a

show_image(image, title='', cmap=None)

Source code in src\image\util.py
22
23
24
25
26
27
28
29
def show_image(image, title='', cmap=None):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if cmap is None:
        cmap = 'gray' if nchannels == 1 else None
    plt.imshow(image, cmap=cmap)
    if title != '':
        plt.title(title)
    plt.show()

split_num_text(text)

Source code in src\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
def split_num_text(text: str) -> list:
    num_texts = []
    block = ''
    is_num0 = None
    if text is None:
        return None

    for c in text:
        is_num = (c.isnumeric() or c == '.')
        if is_num0 is not None and is_num != is_num0:
            num_texts.append(block)
            block = ''
        block += c
        is_num0 = is_num
    if block != '':
        num_texts.append(block)

    num_texts2 = []
    for block in num_texts:
        block = block.strip()
        try:
            block = float(block)
        except:
            pass
        if block not in [' ', ',', '|']:
            num_texts2.append(block)
    return num_texts2

split_numeric(text)

Source code in src\util.py
153
154
155
156
157
158
159
160
def split_numeric(text: str) -> list:
    num_parts = []
    parts = re.split(r'[_/\\.]', text)
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            num_parts.append(part)
    return num_parts

split_numeric_dict(text)

Source code in src\util.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
def split_numeric_dict(text: str) -> dict:
    num_parts = {}
    parts = re.split(r'[_/\\.]', text)
    parti = 0
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            index = num_span.start()
            label = part[:index]
            if label == '':
                label = parti
            num_parts[label] = num_span.group()
            parti += 1
    return num_parts

split_path(path)

Source code in src\util.py
131
132
def split_path(path: str) -> list:
    return os.path.normpath(path).split(os.path.sep)

split_value_unit_list(text)

Source code in src\util.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
def split_value_unit_list(text: str) -> list:
    value_units = []
    if text is None:
        return None

    items = split_num_text(text)
    if isinstance(items[-1], str):
        def_unit = items[-1]
    else:
        def_unit = ''

    i = 0
    while i < len(items):
        value = items[i]
        if i + 1 < len(items):
            unit = items[i + 1]
        else:
            unit = ''
        if not isinstance(value, str):
            if isinstance(unit, str):
                i += 1
            else:
                unit = def_unit
            value_units.append((value, unit))
        i += 1
    return value_units

uint8_image(image)

Source code in src\image\util.py
68
69
70
71
72
73
74
75
def uint8_image(image):
    source_dtype = image.dtype
    if source_dtype.kind == 'f':
        image = image * 255
    elif source_dtype.itemsize != 1:
        factor = 2 ** (8 * (source_dtype.itemsize - 1))
        image = image // factor
    return image.astype(np.uint8)

validate_transform(transform, max_rotation=None)

Source code in src\util.py
382
383
384
385
386
387
388
389
390
391
392
393
394
def validate_transform(transform, max_rotation=None):
    if transform is None:
        return False
    transform = np.array(transform)
    if np.any(np.isnan(transform)):
        return False
    if np.any(np.isinf(transform)):
        return False
    if np.linalg.det(transform) == 0:
        return False
    if  max_rotation is not None and abs(normalise_rotation(get_rotation_from_transform(transform))) > max_rotation:
        return False
    return True

xyz_to_dict(xyz, axes='xyz')

Source code in src\util.py
447
448
449
def xyz_to_dict(xyz, axes='xyz'):
    dct = {dim: value for dim, value in zip(axes, xyz)}
    return dct

RegistrationMethodCvFeatures

RegistrationMethodCvFeatures

Bases: RegistrationMethod

Source code in src\registration_methods\RegistrationMethodCvFeatures.py
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
class RegistrationMethodCvFeatures(RegistrationMethod):
    def detect_features(self, data0):
        data = data0.astype(self.source_type)

        data = uint8_image(data)
        scale = min(1000 / np.linalg.norm(data.shape), 1)
        data = cv.resize(data, (0, 0), fx=scale, fy=scale)
        feature_model = cv.SIFT_create(contrastThreshold=0.1)
        #feature_model = cv.ORB_create(patchSize=8, edgeThreshold=8)
        keypoints, desc = feature_model.detectAndCompute(data, None)
        points = [np.array(keypoint.pt) / scale for keypoint in keypoints]
        return points, desc

    def registration(self, fixed_data: SpatialImage, moving_data: SpatialImage, **kwargs) -> dict:
        fixed_points, fixed_desc = self.detect_features(fixed_data.data)
        moving_points, moving_desc = self.detect_features(moving_data.data)
        threshold = get_mean_nn_distance(fixed_points, moving_points)

        matcher = cv.BFMatcher()
        #matches0 = matcher.match(fixed_desc, moving_desc)
        matches0 = matcher.knnMatch(fixed_desc, moving_desc, k=2)

        matches = []
        for m, n in matches0:
            if m.distance < 0.92 * n.distance:
                matches.append(m)

        transform = None
        quality = 0
        if len(matches) >= 4:
            fixed_points2 = np.float32([fixed_points[match.queryIdx] for match in matches])
            moving_points2 = np.float32([moving_points[match.trainIdx] for match in matches])
            transform, inliers = cv.findHomography(fixed_points2, moving_points2,
                                                   method=cv.USAC_MAGSAC, ransacReprojThreshold=threshold)
            if transform is not None:
                fixed_points3 = [point for point, is_inlier in zip(fixed_points2, inliers) if is_inlier]
                moving_points3 = [point for point, is_inlier in zip(moving_points2, inliers) if is_inlier]
                metrics = calc_match_metrics(fixed_points3, moving_points3, transform, threshold)
                #quality = np.mean(inliers)
                quality = metrics['match_rate']

        if not validate_transform(transform):
            logging.error('Unable to find feature-based registration')
            transform = np.eye(3)

        return {
            "affine_matrix": param_utils.invert_coordinate_order(transform),  # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
            "quality": quality  # float between 0 and 1 (if not available, set to 1.0)
        }
detect_features(data0)
Source code in src\registration_methods\RegistrationMethodCvFeatures.py
14
15
16
17
18
19
20
21
22
23
24
def detect_features(self, data0):
    data = data0.astype(self.source_type)

    data = uint8_image(data)
    scale = min(1000 / np.linalg.norm(data.shape), 1)
    data = cv.resize(data, (0, 0), fx=scale, fy=scale)
    feature_model = cv.SIFT_create(contrastThreshold=0.1)
    #feature_model = cv.ORB_create(patchSize=8, edgeThreshold=8)
    keypoints, desc = feature_model.detectAndCompute(data, None)
    points = [np.array(keypoint.pt) / scale for keypoint in keypoints]
    return points, desc
registration(fixed_data, moving_data, **kwargs)
Source code in src\registration_methods\RegistrationMethodCvFeatures.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
def registration(self, fixed_data: SpatialImage, moving_data: SpatialImage, **kwargs) -> dict:
    fixed_points, fixed_desc = self.detect_features(fixed_data.data)
    moving_points, moving_desc = self.detect_features(moving_data.data)
    threshold = get_mean_nn_distance(fixed_points, moving_points)

    matcher = cv.BFMatcher()
    #matches0 = matcher.match(fixed_desc, moving_desc)
    matches0 = matcher.knnMatch(fixed_desc, moving_desc, k=2)

    matches = []
    for m, n in matches0:
        if m.distance < 0.92 * n.distance:
            matches.append(m)

    transform = None
    quality = 0
    if len(matches) >= 4:
        fixed_points2 = np.float32([fixed_points[match.queryIdx] for match in matches])
        moving_points2 = np.float32([moving_points[match.trainIdx] for match in matches])
        transform, inliers = cv.findHomography(fixed_points2, moving_points2,
                                               method=cv.USAC_MAGSAC, ransacReprojThreshold=threshold)
        if transform is not None:
            fixed_points3 = [point for point, is_inlier in zip(fixed_points2, inliers) if is_inlier]
            moving_points3 = [point for point, is_inlier in zip(moving_points2, inliers) if is_inlier]
            metrics = calc_match_metrics(fixed_points3, moving_points3, transform, threshold)
            #quality = np.mean(inliers)
            quality = metrics['match_rate']

    if not validate_transform(transform):
        logging.error('Unable to find feature-based registration')
        transform = np.eye(3)

    return {
        "affine_matrix": param_utils.invert_coordinate_order(transform),  # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
        "quality": quality  # float between 0 and 1 (if not available, set to 1.0)
    }

RegistrationMethodDummy

RegistrationMethodDummy

Bases: RegistrationMethod

Source code in src\registration_methods\RegistrationMethodDummy.py
 8
 9
10
11
12
13
14
15
16
17
class RegistrationMethodDummy(RegistrationMethod):
    def registration(self, fixed_data: SpatialImage, moving_data: SpatialImage, **kwargs) -> dict:
        transform = cv.getRotationMatrix2D((fixed_data.shape[0] // 2, fixed_data.shape[1] // 2), 38, 1)
        transform = np.vstack([transform, [0, 0, 1]])
        transform[:, 2] += [300, 25, 0]

        return {
            "affine_matrix": transform,  # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
            "quality": 1  # float between 0 and 1 (if not available, set to 1.0)
        }
registration(fixed_data, moving_data, **kwargs)
Source code in src\registration_methods\RegistrationMethodDummy.py
 9
10
11
12
13
14
15
16
17
def registration(self, fixed_data: SpatialImage, moving_data: SpatialImage, **kwargs) -> dict:
    transform = cv.getRotationMatrix2D((fixed_data.shape[0] // 2, fixed_data.shape[1] // 2), 38, 1)
    transform = np.vstack([transform, [0, 0, 1]])
    transform[:, 2] += [300, 25, 0]

    return {
        "affine_matrix": transform,  # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
        "quality": 1  # float between 0 and 1 (if not available, set to 1.0)
    }

RegistrationMethodSkFeatures

RegistrationMethodSkFeatures

Bases: RegistrationMethod

Source code in src\registration_methods\RegistrationMethodSkFeatures.py
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
class RegistrationMethodSkFeatures(RegistrationMethod):
    def __init__(self, source, params, debug=False):
        super().__init__(source, params, debug=debug)
        self.method = params.get('name', 'sift').lower()
        self.full_size_gaussian_sigma = params.get('gaussian_sigma', params.get('sigma', 1))
        self.downscale_factor = params.get('downscale_factor', params.get('downscale', np.sqrt(2)))
        self.nkeypoints = params.get('nkeypoints', 5000)
        self.cross_check = params.get('cross_check', True)
        self.lowe_ratio = params.get('lowe_ratio', 0.92)
        self.inlier_threshold_factor = params.get('inlier_threshold_factor', 0.05)
        self.min_matches = params.get('min_matches', 10)
        self.max_trails = params.get('max_trials', 100)
        self.ransac_iterations = params.get('ransac_iterations', 10)

        transform_type = params.get('transform_type', '').lower()
        if transform_type == 'affine':
            self.transform_type = AffineTransform
        else:
            self.transform_type = EuclideanTransform

        if transform_type in ['translation', 'translate']:
            self.max_rotation = 10  # rotation should be ~0; check <10 degrees
        else:
            self.max_rotation = None

    def detect_features(self, data0, gaussian_sigma=None):
        points = []
        desc = []

        if 'z' in data0.dims:
            # make data 2D
            data0 = data0.max('z')
        data = self.convert_data_to_float(data0)
        data = norm_image_variance(data)
        if gaussian_sigma:
            data = gaussian(data, sigma=gaussian_sigma)

        try:
            # not thread-safe - create instance that is not re-used in other thread
            if 'orb' in self.method:
                feature_model = ORB(n_keypoints=self.nkeypoints, downscale=self.downscale_factor)
            else:
                feature_model = SIFT()
            feature_model.detect_and_extract(data)
            points = feature_model.keypoints
            desc = feature_model.descriptors
            if len(points) > self.nkeypoints:
                if self.debug:
                    print('#keypoints0', len(points))
                indices = np.random.choice(len(points), self.nkeypoints, replace=False)
                points = points[indices]
                desc = desc[indices]
            if len(points) == 0:
                logging.error('No features detected!')
        except RuntimeError as e:
            logging.error(e)

        if len(points) < self.nkeypoints / 100:
            # TODO: if #points is too low: alternative feature detection?
            logging.warning(f'Low number of features: {len(points)}')

        #inliers = filter_edge_points(points, np.flip(data0.shape[:2]))
        #points = points[inliers]
        #desc = desc[inliers]

        #show_image(draw_keypoints(data, np.flip(self.feature_model.keypoints, axis=-1)))

        return points, desc, data

    def match(self, fixed_points, fixed_desc, moving_points, moving_desc,
              min_matches, cross_check, lowe_ratio, inlier_threshold, mean_size_dist):
        transform = None
        quality = 0
        inliers = []

        matches = match_descriptors(fixed_desc, moving_desc, cross_check=cross_check, max_ratio=lowe_ratio)
        if len(matches) >= min_matches:
            fixed_points2 = np.array([fixed_points[match[0]] for match in matches])
            moving_points2 = np.array([moving_points[match[1]] for match in matches])

            transforms = []
            inliers_list = []
            translations = []
            tot_weight = 0
            tot_translation = None
            for i in range(self.ransac_iterations):
                transform, inliers = ransac((fixed_points2, moving_points2), self.transform_type,
                                            min_samples=min_matches,
                                            residual_threshold=inlier_threshold,
                                            max_trials=self.max_trails)
                if inliers is None:
                    inliers = []
                if len(inliers) > 0 and validate_transform(transform, max_rotation=self.max_rotation):
                    weight = np.mean(inliers)
                    weighted_translation = transform.translation * weight
                    tot_weight += weight
                    if tot_translation is None:
                        tot_translation = weighted_translation
                    else:
                        tot_translation += weighted_translation
                    translations.append(transform.translation)
                    transforms.append(transform)
                    inliers_list.append(inliers)
                    quality += (np.sum(inliers) / self.nkeypoints) ** (1/3) # ^1/3 to decrease sensitivity

            quality /= self.ransac_iterations

            if tot_weight > 0:
                mean_translation = tot_translation / tot_weight
                best_index = np.argmin(np.linalg.norm(translations - mean_translation, axis=1))
                transform = transforms[best_index]
                inliers = inliers_list[best_index]
                quality *= 1 - np.clip(np.linalg.norm(np.std(translations, axis=0)) / mean_size_dist, 0, 1) ** 3  # ^3 to increase sensitivity
                if self.debug:
                    print('norm translation', mean_translation / mean_size_dist, 'norm SD', np.linalg.norm(np.std(translations, axis=0)) / mean_size_dist)
            if self.debug:
                print('%inliers', np.mean(inliers), '#good ransac iterations', len(inliers_list))

        return transform, quality, matches, inliers

    def registration_physical_space(
            self,
            fixed_data,
            moving_data,
            *,
            fixed_origin,
            moving_origin,
            fixed_spacing,
            moving_spacing,
            initial_affine,
            transform_types=None,
            **ants_registration_kwargs,
    ):
        return {
            "affine_matrix": np.eye(self.ndims + 1),
            # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
            "quality": 1  # float between 0 and 1 (if not available, set to 1.0)
        }

    def registration(self, fixed_data: SpatialImage, moving_data: SpatialImage, **kwargs) -> dict:
        eye_transform = np.eye(self.ndims + 1)
        transform = eye_transform
        quality = 0
        matches = []
        inliers = []

        #print(self.count, fixed_data.name, moving_data.name)
        #self.count+=1
        #return {"affine_matrix": transform, "quality": 1}

        if np.isnan(fixed_data).all() or np.isnan(moving_data).all():
            logging.warning('No overlapping data')
            return {
                "affine_matrix": transform,  # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
                "quality": 0  # float between 0 and 1 (if not available, set to 1.0)
            }

        full_size_dist = np.linalg.norm(self.full_size)
        mean_size_dist = np.mean([np.linalg.norm(data.shape) for data in [fixed_data, moving_data]])
        scale = mean_size_dist / full_size_dist
        gaussian_sigma = self.full_size_gaussian_sigma * (scale ** (1/3))
        mean_size = np.mean([np.linalg.norm(data.shape) / np.sqrt(self.ndims) for data in [fixed_data, moving_data]])
        inlier_threshold = mean_size * self.inlier_threshold_factor

        fixed_points, fixed_desc, fixed_data2 = self.detect_features(fixed_data, gaussian_sigma)
        moving_points, moving_desc, moving_data2 = self.detect_features(moving_data, gaussian_sigma)

        if len(fixed_desc) > 0 and len(moving_desc) > 0:
            transform, quality, matches, inliers = self.match(fixed_points, fixed_desc, moving_points, moving_desc,
                                                              min_matches=self.min_matches, cross_check=self.cross_check,
                                                              lowe_ratio=self.lowe_ratio, inlier_threshold=inlier_threshold,
                                                              mean_size_dist=mean_size_dist)

            #landmark_initializer = sitk.LandmarkBasedTransformInitializerFilter()
            #landmark_initializer.SetFixedLandmarks(fixed_points2)
            #landmark_initializer.SetMovingLandmarks(moving_points2)
            #transform = sitk.Euler2DTransform()
            #output_transform = landmark_initializer.Execute(transform)
            #print(output_transform)

            transform = np.array(transform)

        if self.debug:
            print(f'#keypoints: {len(fixed_desc)},{len(moving_desc)}'
                  f' #matches: {len(matches)} #inliers: {np.sum(inliers):.0f} quality: {quality:.3f}')

            #output_filename = 'matches_' + datetime.now().strftime('%Y%m%d_%H%M%S_%f')[:-3]
            #save_tiff(output_filename + '_f.tiff', fixed_data.astype(self.source_type))
            #save_tiff(output_filename + '_m.tiff', moving_data.astype(self.source_type))

            #if np.sum(inliers) > 0:
            #    draw_keypoints_matches_sk(fixed_data2, fixed_points,
            #                              moving_data2, moving_points,
            #                              matches[inliers],
            #                              show_plot=False, output_filename=output_filename + '_i.tiff')

            #draw_keypoints_matches(fixed_data2, fixed_points,
            #                       moving_data2, moving_points,
            #                       matches, inliers,
            #                       show_plot=False, output_filename=output_filename + '.tiff')

        if quality == 0 or np.sum(inliers) == 0:
            logging.error('Unable to find feature-based registration')
            transform = eye_transform

        if len(transform) < self.ndims + 1:
            transform3d = eye_transform
            transform3d[1:, 1:] = transform
            transform = transform3d

        return {
            "affine_matrix": transform,  # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
            "quality": quality  # float between 0 and 1 (if not available, set to 1.0)
        }
cross_check = params.get('cross_check', True) instance-attribute
downscale_factor = params.get('downscale_factor', params.get('downscale', np.sqrt(2))) instance-attribute
full_size_gaussian_sigma = params.get('gaussian_sigma', params.get('sigma', 1)) instance-attribute
inlier_threshold_factor = params.get('inlier_threshold_factor', 0.05) instance-attribute
lowe_ratio = params.get('lowe_ratio', 0.92) instance-attribute
max_rotation = 10 instance-attribute
max_trails = params.get('max_trials', 100) instance-attribute
method = params.get('name', 'sift').lower() instance-attribute
min_matches = params.get('min_matches', 10) instance-attribute
nkeypoints = params.get('nkeypoints', 5000) instance-attribute
ransac_iterations = params.get('ransac_iterations', 10) instance-attribute
transform_type = AffineTransform instance-attribute
__init__(source, params, debug=False)
Source code in src\registration_methods\RegistrationMethodSkFeatures.py
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
def __init__(self, source, params, debug=False):
    super().__init__(source, params, debug=debug)
    self.method = params.get('name', 'sift').lower()
    self.full_size_gaussian_sigma = params.get('gaussian_sigma', params.get('sigma', 1))
    self.downscale_factor = params.get('downscale_factor', params.get('downscale', np.sqrt(2)))
    self.nkeypoints = params.get('nkeypoints', 5000)
    self.cross_check = params.get('cross_check', True)
    self.lowe_ratio = params.get('lowe_ratio', 0.92)
    self.inlier_threshold_factor = params.get('inlier_threshold_factor', 0.05)
    self.min_matches = params.get('min_matches', 10)
    self.max_trails = params.get('max_trials', 100)
    self.ransac_iterations = params.get('ransac_iterations', 10)

    transform_type = params.get('transform_type', '').lower()
    if transform_type == 'affine':
        self.transform_type = AffineTransform
    else:
        self.transform_type = EuclideanTransform

    if transform_type in ['translation', 'translate']:
        self.max_rotation = 10  # rotation should be ~0; check <10 degrees
    else:
        self.max_rotation = None
detect_features(data0, gaussian_sigma=None)
Source code in src\registration_methods\RegistrationMethodSkFeatures.py
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
def detect_features(self, data0, gaussian_sigma=None):
    points = []
    desc = []

    if 'z' in data0.dims:
        # make data 2D
        data0 = data0.max('z')
    data = self.convert_data_to_float(data0)
    data = norm_image_variance(data)
    if gaussian_sigma:
        data = gaussian(data, sigma=gaussian_sigma)

    try:
        # not thread-safe - create instance that is not re-used in other thread
        if 'orb' in self.method:
            feature_model = ORB(n_keypoints=self.nkeypoints, downscale=self.downscale_factor)
        else:
            feature_model = SIFT()
        feature_model.detect_and_extract(data)
        points = feature_model.keypoints
        desc = feature_model.descriptors
        if len(points) > self.nkeypoints:
            if self.debug:
                print('#keypoints0', len(points))
            indices = np.random.choice(len(points), self.nkeypoints, replace=False)
            points = points[indices]
            desc = desc[indices]
        if len(points) == 0:
            logging.error('No features detected!')
    except RuntimeError as e:
        logging.error(e)

    if len(points) < self.nkeypoints / 100:
        # TODO: if #points is too low: alternative feature detection?
        logging.warning(f'Low number of features: {len(points)}')

    #inliers = filter_edge_points(points, np.flip(data0.shape[:2]))
    #points = points[inliers]
    #desc = desc[inliers]

    #show_image(draw_keypoints(data, np.flip(self.feature_model.keypoints, axis=-1)))

    return points, desc, data
match(fixed_points, fixed_desc, moving_points, moving_desc, min_matches, cross_check, lowe_ratio, inlier_threshold, mean_size_dist)
Source code in src\registration_methods\RegistrationMethodSkFeatures.py
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
def match(self, fixed_points, fixed_desc, moving_points, moving_desc,
          min_matches, cross_check, lowe_ratio, inlier_threshold, mean_size_dist):
    transform = None
    quality = 0
    inliers = []

    matches = match_descriptors(fixed_desc, moving_desc, cross_check=cross_check, max_ratio=lowe_ratio)
    if len(matches) >= min_matches:
        fixed_points2 = np.array([fixed_points[match[0]] for match in matches])
        moving_points2 = np.array([moving_points[match[1]] for match in matches])

        transforms = []
        inliers_list = []
        translations = []
        tot_weight = 0
        tot_translation = None
        for i in range(self.ransac_iterations):
            transform, inliers = ransac((fixed_points2, moving_points2), self.transform_type,
                                        min_samples=min_matches,
                                        residual_threshold=inlier_threshold,
                                        max_trials=self.max_trails)
            if inliers is None:
                inliers = []
            if len(inliers) > 0 and validate_transform(transform, max_rotation=self.max_rotation):
                weight = np.mean(inliers)
                weighted_translation = transform.translation * weight
                tot_weight += weight
                if tot_translation is None:
                    tot_translation = weighted_translation
                else:
                    tot_translation += weighted_translation
                translations.append(transform.translation)
                transforms.append(transform)
                inliers_list.append(inliers)
                quality += (np.sum(inliers) / self.nkeypoints) ** (1/3) # ^1/3 to decrease sensitivity

        quality /= self.ransac_iterations

        if tot_weight > 0:
            mean_translation = tot_translation / tot_weight
            best_index = np.argmin(np.linalg.norm(translations - mean_translation, axis=1))
            transform = transforms[best_index]
            inliers = inliers_list[best_index]
            quality *= 1 - np.clip(np.linalg.norm(np.std(translations, axis=0)) / mean_size_dist, 0, 1) ** 3  # ^3 to increase sensitivity
            if self.debug:
                print('norm translation', mean_translation / mean_size_dist, 'norm SD', np.linalg.norm(np.std(translations, axis=0)) / mean_size_dist)
        if self.debug:
            print('%inliers', np.mean(inliers), '#good ransac iterations', len(inliers_list))

    return transform, quality, matches, inliers
registration(fixed_data, moving_data, **kwargs)
Source code in src\registration_methods\RegistrationMethodSkFeatures.py
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
def registration(self, fixed_data: SpatialImage, moving_data: SpatialImage, **kwargs) -> dict:
    eye_transform = np.eye(self.ndims + 1)
    transform = eye_transform
    quality = 0
    matches = []
    inliers = []

    #print(self.count, fixed_data.name, moving_data.name)
    #self.count+=1
    #return {"affine_matrix": transform, "quality": 1}

    if np.isnan(fixed_data).all() or np.isnan(moving_data).all():
        logging.warning('No overlapping data')
        return {
            "affine_matrix": transform,  # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
            "quality": 0  # float between 0 and 1 (if not available, set to 1.0)
        }

    full_size_dist = np.linalg.norm(self.full_size)
    mean_size_dist = np.mean([np.linalg.norm(data.shape) for data in [fixed_data, moving_data]])
    scale = mean_size_dist / full_size_dist
    gaussian_sigma = self.full_size_gaussian_sigma * (scale ** (1/3))
    mean_size = np.mean([np.linalg.norm(data.shape) / np.sqrt(self.ndims) for data in [fixed_data, moving_data]])
    inlier_threshold = mean_size * self.inlier_threshold_factor

    fixed_points, fixed_desc, fixed_data2 = self.detect_features(fixed_data, gaussian_sigma)
    moving_points, moving_desc, moving_data2 = self.detect_features(moving_data, gaussian_sigma)

    if len(fixed_desc) > 0 and len(moving_desc) > 0:
        transform, quality, matches, inliers = self.match(fixed_points, fixed_desc, moving_points, moving_desc,
                                                          min_matches=self.min_matches, cross_check=self.cross_check,
                                                          lowe_ratio=self.lowe_ratio, inlier_threshold=inlier_threshold,
                                                          mean_size_dist=mean_size_dist)

        #landmark_initializer = sitk.LandmarkBasedTransformInitializerFilter()
        #landmark_initializer.SetFixedLandmarks(fixed_points2)
        #landmark_initializer.SetMovingLandmarks(moving_points2)
        #transform = sitk.Euler2DTransform()
        #output_transform = landmark_initializer.Execute(transform)
        #print(output_transform)

        transform = np.array(transform)

    if self.debug:
        print(f'#keypoints: {len(fixed_desc)},{len(moving_desc)}'
              f' #matches: {len(matches)} #inliers: {np.sum(inliers):.0f} quality: {quality:.3f}')

        #output_filename = 'matches_' + datetime.now().strftime('%Y%m%d_%H%M%S_%f')[:-3]
        #save_tiff(output_filename + '_f.tiff', fixed_data.astype(self.source_type))
        #save_tiff(output_filename + '_m.tiff', moving_data.astype(self.source_type))

        #if np.sum(inliers) > 0:
        #    draw_keypoints_matches_sk(fixed_data2, fixed_points,
        #                              moving_data2, moving_points,
        #                              matches[inliers],
        #                              show_plot=False, output_filename=output_filename + '_i.tiff')

        #draw_keypoints_matches(fixed_data2, fixed_points,
        #                       moving_data2, moving_points,
        #                       matches, inliers,
        #                       show_plot=False, output_filename=output_filename + '.tiff')

    if quality == 0 or np.sum(inliers) == 0:
        logging.error('Unable to find feature-based registration')
        transform = eye_transform

    if len(transform) < self.ndims + 1:
        transform3d = eye_transform
        transform3d[1:, 1:] = transform
        transform = transform3d

    return {
        "affine_matrix": transform,  # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
        "quality": quality  # float between 0 and 1 (if not available, set to 1.0)
    }
registration_physical_space(fixed_data, moving_data, *, fixed_origin, moving_origin, fixed_spacing, moving_spacing, initial_affine, transform_types=None, **ants_registration_kwargs)
Source code in src\registration_methods\RegistrationMethodSkFeatures.py
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
def registration_physical_space(
        self,
        fixed_data,
        moving_data,
        *,
        fixed_origin,
        moving_origin,
        fixed_spacing,
        moving_spacing,
        initial_affine,
        transform_types=None,
        **ants_registration_kwargs,
):
    return {
        "affine_matrix": np.eye(self.ndims + 1),
        # homogenous matrix of shape (ndim + 1, ndim + 1), axis order (z, y, x)
        "quality": 1  # float between 0 and 1 (if not available, set to 1.0)
    }

apply_transform(points, transform)

Source code in src\util.py
371
372
373
374
375
376
377
378
379
def apply_transform(points, transform):
    new_points = []
    for point in points:
        point_len = len(point)
        while len(point) < len(transform):
            point = list(point) + [1]
        new_point = np.dot(point, np.transpose(np.array(transform)))
        new_points.append(new_point[:point_len])
    return new_points

blur_image(image, sigma)

Source code in src\image\util.py
432
433
434
435
436
437
438
439
440
def blur_image(image, sigma):
    nchannels = image.shape[2] if image.ndim == 3 else 1
    if nchannels not in [1, 3]:
        new_image = np.zeros_like(image)
        for channeli in range(nchannels):
            new_image[..., channeli] = blur_image_single(image[..., channeli], sigma)
    else:
        new_image = blur_image_single(image, sigma)
    return new_image

blur_image_single(image, sigma)

Source code in src\image\util.py
428
429
def blur_image_single(image, sigma):
    return gaussian_filter(image, sigma)

calc_foreground_map(sims)

Source code in src\image\util.py
564
565
566
567
568
569
570
571
572
573
574
575
576
577
def calc_foreground_map(sims):
    if len(sims) <= 2:
        return [True] * len(sims)
    sims = [sim.squeeze().astype(np.float32) for sim in sims]
    median_image = calc_images_median(sims).astype(np.float32)
    difs = [np.mean(np.abs(sim - median_image), (0, 1)) for sim in sims]
    # or use stddev instead of mean?
    threshold = np.mean(difs, 0)
    #threshold, _ = cv.threshold(np.array(difs).astype(np.uint16), 0, 1, cv.THRESH_OTSU)
    #threshold, foregrounds = filter_noise_images(channel_images)
    map = (difs > threshold)
    if np.all(map == False):
        return [True] * len(sims)
    return map

calc_images_median(images)

Source code in src\image\util.py
443
444
445
446
def calc_images_median(images):
    out_image = np.zeros(shape=images[0].shape, dtype=images[0].dtype)
    median_image = np.median(images, 0, out_image)
    return median_image

calc_images_quantiles(images, quantiles)

Source code in src\image\util.py
449
450
451
def calc_images_quantiles(images, quantiles):
    quantile_images = [image.astype(np.float32) for image in np.quantile(images, quantiles, 0)]
    return quantile_images

calc_output_properties(sims, transform_key, z_scale=None)

Source code in src\image\util.py
627
628
629
630
631
632
633
634
635
636
637
def calc_output_properties(sims, transform_key, z_scale=None):
    output_spacing = si_utils.get_spacing_from_sim(sims[0])
    if z_scale is not None:
        output_spacing['z'] = z_scale
    output_properties = fusion.calc_fusion_stack_properties(
        sims,
        [si_utils.get_affine_from_sim(sim, transform_key) for sim in sims],
        output_spacing,
        mode='union',
    )
    return output_properties

calc_pyramid(xyzct, npyramid_add=0, pyramid_downsample=2, volumetric_resize=False)

Source code in src\image\util.py
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def calc_pyramid(xyzct: tuple, npyramid_add: int = 0, pyramid_downsample: float = 2,
                 volumetric_resize: bool = False) -> list:
    x, y, z, c, t = xyzct
    if volumetric_resize and z > 1:
        size = (x, y, z)
    else:
        size = (x, y)
    sizes_add = []
    scale = 1
    for _ in range(npyramid_add):
        scale /= pyramid_downsample
        scaled_size = np.maximum(np.round(np.multiply(size, scale)).astype(int), 1)
        sizes_add.append(scaled_size)
    return sizes_add

check_round_significants(a, significant_digits)

Source code in src\util.py
113
114
115
116
117
118
119
120
121
def check_round_significants(a: float, significant_digits: int) -> float:
    rounded = round_significants(a, significant_digits)
    if a != 0:
        dif = 1 - rounded / a
    else:
        dif = rounded - a
    if abs(dif) < 10 ** -significant_digits:
        return rounded
    return a

color_image(image)

Source code in src\image\util.py
42
43
44
45
46
47
def color_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 1:
        return cv.cvtColor(np.array(image), cv.COLOR_GRAY2RGB)
    else:
        return image

combine_transforms(transforms)

Source code in src\image\util.py
709
710
711
712
713
714
715
716
def combine_transforms(transforms):
    combined_transform = None
    for transform in transforms:
        if combined_transform is None:
            combined_transform = transform
        else:
            combined_transform = np.dot(transform, combined_transform)
    return combined_transform

convert_image_sign_type(image, target_dtype)

Source code in src\image\util.py
 97
 98
 99
100
101
102
103
104
105
106
107
def convert_image_sign_type(image: np.ndarray, target_dtype: np.dtype) -> np.ndarray:
    source_dtype = image.dtype
    if source_dtype.kind == target_dtype.kind:
        new_image = image
    elif source_dtype.kind == 'i':
        new_image = ensure_unsigned_image(image)
    else:
        # conversion without overhead
        offset = 2 ** (8 * target_dtype.itemsize - 1)
        new_image = (image - offset).astype(target_dtype)
    return new_image

convert_rational_value(value)

Source code in src\util.py
321
322
323
324
325
326
327
def convert_rational_value(value) -> float:
    if value is not None and isinstance(value, tuple):
        if value[0] == value[1]:
            value = value[0]
        else:
            value = value[0] / value[1]
    return value

convert_to_um(value, unit)

Source code in src\util.py
310
311
312
313
314
315
316
317
318
def convert_to_um(value, unit):
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    return value * conversions.get(unit, 1)

create_compression_filter(compression)

Source code in src\image\util.py
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
def create_compression_filter(compression: list) -> tuple:
    compressor, compression_filters = None, None
    compression = ensure_list(compression)
    if compression is not None and len(compression) > 0:
        compression_type = compression[0].lower()
        if len(compression) > 1:
            level = int(compression[1])
        else:
            level = None
        if 'lzw' in compression_type:
            from imagecodecs.numcodecs import Lzw
            compression_filters = [Lzw()]
        elif '2k' in compression_type or '2000' in compression_type:
            from imagecodecs.numcodecs import Jpeg2k
            compression_filters = [Jpeg2k(level=level)]
        elif 'jpegls' in compression_type:
            from imagecodecs.numcodecs import Jpegls
            compression_filters = [Jpegls(level=level)]
        elif 'jpegxr' in compression_type:
            from imagecodecs.numcodecs import Jpegxr
            compression_filters = [Jpegxr(level=level)]
        elif 'jpegxl' in compression_type:
            from imagecodecs.numcodecs import Jpegxl
            compression_filters = [Jpegxl(level=level)]
        else:
            compressor = compression
    return compressor, compression_filters

create_transform(center, angle, matrix_size=3)

Source code in src\util.py
356
357
358
359
360
361
362
363
364
365
366
367
368
def create_transform(center, angle, matrix_size=3):
    if isinstance(center, dict):
        center = dict_to_xyz(center)
    if len(center) == 2:
        center = np.array(list(center) + [0])
    if angle is None:
        angle = 0
    r = Rotation.from_euler('z', angle, degrees=True)
    t = center - r.apply(center, inverse=True)
    transform = np.eye(matrix_size)
    transform[:3, :3] = np.transpose(r.as_matrix())
    transform[:3, -1] += t
    return transform

create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0))

Source code in src\util.py
348
349
350
351
352
353
def create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0)):
    transform = cv.getRotationMatrix2D(center[:2], angle, scale)
    transform[:, 2] += translate
    if len(transform) == 2:
        transform = np.vstack([transform, [0, 0, 1]])   # create 3x3 matrix
    return transform

desc_to_dict(desc)

Source code in src\util.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def desc_to_dict(desc: str) -> dict:
    desc_dict = {}
    if desc.startswith('{'):
        try:
            metadata = ast.literal_eval(desc)
            return metadata
        except:
            pass
    for item in re.split(r'[\r\n\t|]', desc):
        item_sep = '='
        if ':' in item:
            item_sep = ':'
        if item_sep in item:
            items = item.split(item_sep)
            key = items[0].strip()
            value = items[1].strip()
            for dtype in (int, float, bool):
                try:
                    value = dtype(value)
                    break
                except:
                    pass
            desc_dict[key] = value
    return desc_dict

detect_area_points(image)

Source code in src\image\util.py
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
def detect_area_points(image):
    method = cv.THRESH_OTSU
    threshold = -5
    contours = []
    while len(contours) <= 1 and threshold <= 255:
        _, binimage = cv.threshold(np.array(uint8_image(image)), threshold, 255, method)
        contours0 = cv.findContours(binimage, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
        contours = contours0[0] if len(contours0) == 2 else contours0[1]
        method = cv.THRESH_BINARY
        threshold += 5
    area_contours = [(contour, cv.contourArea(contour)) for contour in contours]
    area_contours.sort(key=lambda contour_area: contour_area[1], reverse=True)
    min_area = max(np.mean([area for contour, area in area_contours]), 1)
    area_points = [(get_center(contour), area) for contour, area in area_contours if area > min_area]

    #image = cv.cvtColor(image, cv.COLOR_GRAY2BGR)
    #for point in area_points:
    #    radius = int(np.round(np.sqrt(point[1]/np.pi)))
    #    cv.circle(image, tuple(np.round(point[0]).astype(int)), radius, (255, 0, 0), -1)
    #show_image(image)
    return area_points

dict_to_xyz(dct, keys='xyz')

Source code in src\util.py
452
453
def dict_to_xyz(dct, keys='xyz'):
    return [dct[key] for key in keys if key in dct]

dir_regex(pattern)

Source code in src\util.py
141
142
143
144
145
146
def dir_regex(pattern):
    files = []
    for pattern_item in ensure_list(pattern):
        files.extend(glob.glob(pattern_item, recursive=True))
    files_sorted = sorted(files, key=lambda file: find_all_numbers(get_filetitle(file)))
    return files_sorted

draw_edge_filter(bounds)

Source code in src\util.py
496
497
498
499
500
501
502
503
504
def draw_edge_filter(bounds):
    out_image = np.zeros(np.flip(bounds))
    y, x = np.where(out_image == 0)
    points = np.transpose([x, y])

    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) * 10, 0, 1)
    return position_weights.reshape(np.flip(bounds))

draw_keypoints(image, points, color=(255, 0, 0))

Source code in src\image\util.py
279
280
281
282
283
284
def draw_keypoints(image, points, color=(255, 0, 0)):
    out_image = color_image(float2int_image(image))
    for point in points:
        point = np.round(point).astype(int)
        cv.drawMarker(out_image, tuple(point), color=color, markerType=cv.MARKER_CROSS, markerSize=5, thickness=1)
    return out_image

draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None, points_color='black', match_color='red', inlier_color='lime', show_plot=True, output_filename=None)

Source code in src\image\util.py
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
def draw_keypoints_matches(image1, points1, image2, points2, matches=None, inliers=None,
                           points_color='black', match_color='red', inlier_color='lime',
                           show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape = np.max([image.shape for image in [image1, image2]], axis=0)
    shape_y, shape_x = shape[:2]
    if shape_x > 2 * shape_y:
        merge_axis = 0
        offset2 = [shape_y, 0]
    else:
        merge_axis = 1
        offset2 = [0, shape_x]
    image = np.concatenate([
        np.pad(image1, ((0, shape[0] - image1.shape[0]), (0, shape[1] - image1.shape[1]))),
        np.pad(image2, ((0, shape[0] - image2.shape[0]), (0, shape[1] - image2.shape[1])))
    ], axis=merge_axis)
    ax.imshow(image, cmap='gray')

    ax.scatter(
        points1[:, 1],
        points1[:, 0],
        facecolors='none',
        edgecolors=points_color,
    )
    ax.scatter(
        points2[:, 1] + offset2[1],
        points2[:, 0] + offset2[0],
        facecolors='none',
        edgecolors=points_color,
    )

    for i, match in enumerate(matches):
        color = match_color
        if i < len(inliers) and inliers[i]:
            color = inlier_color
        index1, index2 = match
        ax.plot(
            (points1[index1, 1], points2[index2, 1] + offset2[1]),
            (points1[index1, 0], points2[index2, 0] + offset2[0]),
            '-', linewidth=1, alpha=0.5, color=color,
        )

    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

    return fig, ax

draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None, color=(255, 0, 0), inlier_color=(0, 255, 0), radius=15, thickness=2)

Source code in src\image\util.py
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
def draw_keypoints_matches_cv(image1, points1, image2, points2, matches=None, inliers=None,
                              color=(255, 0, 0), inlier_color=(0, 255, 0), radius = 15, thickness = 2):
    # based on https://gist.github.com/woolpeeker/d7e1821e1b5c556b32aafe10b7a1b7e8
    image1 = uint8_image(image1)
    image2 = uint8_image(image2)
    # We're drawing them side by side.  Get dimensions accordingly.
    new_shape = (max(image1.shape[0], image2.shape[0]), image1.shape[1] + image2.shape[1], 3)
    out_image = np.zeros(new_shape, image1.dtype)
    # Place images onto the new image.
    out_image[0:image1.shape[0], 0:image1.shape[1]] = color_image(image1)
    out_image[0:image2.shape[0], image1.shape[1]:image1.shape[1] + image2.shape[1]] = color_image(image2)

    if matches is not None:
        # Draw lines between matches.  Make sure to offset kp coords in second image appropriately.
        for index, match in enumerate(matches):
            if inliers is not None and inliers[index]:
                line_color = inlier_color
            else:
                line_color = color
            # So the keypoint locs are stored as a tuple of floats.  cv2.line() wants locs as a tuple of ints.
            end1 = tuple(np.round(points1[match[0]]).astype(int))
            end2 = tuple(np.round(points2[match[1]]).astype(int) + np.array([image1.shape[1], 0]))
            cv.line(out_image, end1, end2, line_color, thickness)
            cv.circle(out_image, end1, radius, line_color, thickness)
            cv.circle(out_image, end2, radius, line_color, thickness)
    else:
        # Draw all points if no matches are provided.
        for point in points1:
            point = tuple(np.round(point).astype(int))
            cv.circle(out_image, point, radius, color, thickness)
        for point in points2:
            point = tuple(np.round(point).astype(int) + np.array([image1.shape[1], 0]))
            cv.circle(out_image, point, radius, color, thickness)
    return out_image

draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None, show_plot=True, output_filename=None)

Source code in src\image\util.py
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
def draw_keypoints_matches_sk(image1, points1, image2, points2, matches=None,
                              show_plot=True, output_filename=None):
    fig, ax = plt.subplots(figsize=(16, 8))
    shape_y, shape_x = image1.shape[:2]
    if shape_x > 2 * shape_y:
        alignment = 'vertical'
    else:
        alignment = 'horizontal'
    plot_matched_features(
        image1,
        image2,
        keypoints0=points1,
        keypoints1=points2,
        matches=matches,
        ax=ax,
        alignment=alignment,
        only_matches=True,
    )
    plt.tight_layout()
    if output_filename is not None:
        plt.savefig(output_filename)
    if show_plot:
        plt.show()

ensure_list(x)

Source code in src\util.py
18
19
20
21
22
23
24
def ensure_list(x) -> list:
    if x is None:
        return []
    elif isinstance(x, list):
        return x
    else:
        return [x]

ensure_unsigned_image(image)

Source code in src\image\util.py
85
86
87
88
89
90
91
92
93
94
def ensure_unsigned_image(image: np.ndarray) -> np.ndarray:
    source_dtype = image.dtype
    dtype = ensure_unsigned_type(source_dtype)
    if dtype != source_dtype:
        # conversion without overhead
        offset = 2 ** (8 * dtype.itemsize - 1)
        new_image = image.astype(dtype) + offset
    else:
        new_image = image
    return new_image

ensure_unsigned_type(dtype)

Source code in src\image\util.py
78
79
80
81
82
def ensure_unsigned_type(dtype: np.dtype) -> np.dtype:
    new_dtype = dtype
    if dtype.kind == 'i' or dtype.byteorder == '>' or dtype.byteorder == '<':
        new_dtype = np.dtype(f'u{dtype.itemsize}')
    return new_dtype

eval_context(data, key, default_value, context)

Source code in src\util.py
266
267
268
269
270
271
272
273
274
275
276
277
def eval_context(data, key, default_value, context):
    value = data.get(key, default_value)
    if isinstance(value, str):
        try:
            value = value.format_map(context)
        except:
            pass
        try:
            value = eval(value, context)
        except:
            pass
    return value

export_csv(filename, data, header=None)

Source code in src\util.py
602
603
604
605
606
607
608
def export_csv(filename, data, header=None):
    with open(filename, 'w', encoding='utf8', newline='') as file:
        csvwriter = csv.writer(file)
        if header is not None:
            csvwriter.writerow(header)
        for row in data:
            csvwriter.writerow(row)

export_json(filename, data)

Source code in src\util.py
591
592
593
def export_json(filename, data):
    with open(filename, 'w', encoding='utf8') as file:
        json.dump(data, file, indent=4)

filter_dict(dict0)

Source code in src\util.py
38
39
40
41
42
43
44
45
46
47
48
49
50
def filter_dict(dict0: dict) -> dict:
    new_dict = {}
    for key, value0 in dict0.items():
        if value0 is not None:
            values = []
            for value in ensure_list(value0):
                if isinstance(value, dict):
                    value = filter_dict(value)
                values.append(value)
            if len(values) == 1:
                values = values[0]
            new_dict[key] = values
    return new_dict

filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5)

Source code in src\util.py
487
488
489
490
491
492
493
def filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5):
    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) / filter_factor, 0, 1)
    order_weights = 1 - np.array(range(len(points))) / len(points) / 2
    weights = position_weights * order_weights
    return weights > threshold

filter_noise_images(images)

Source code in src\image\util.py
508
509
510
511
512
513
514
def filter_noise_images(images):
    dtype = images[0].dtype
    maxval = 2 ** (8 * dtype.itemsize) - 1
    image_vars = [np.asarray(np.std(image)).item() for image in images]
    threshold, mask0 = cv.threshold(np.array(image_vars).astype(dtype), 0, maxval, cv.THRESH_OTSU)
    mask = [flag.item() for flag in mask0.astype(bool)]
    return int(threshold), mask

find_all_numbers(text)

Source code in src\util.py
149
150
def find_all_numbers(text: str) -> list:
    return list(map(int, re.findall(r'\d+', text)))

float2int_image(image, target_dtype=np.dtype(np.uint8))

Source code in src\image\util.py
59
60
61
62
63
64
65
def float2int_image(image, target_dtype=np.dtype(np.uint8)):
    source_dtype = image.dtype
    if source_dtype.kind not in ('i', 'u') and not target_dtype.kind == 'f':
        maxval = 2 ** (8 * target_dtype.itemsize) - 1
        return (image * maxval).astype(target_dtype)
    else:
        return image

get_center(data, offset=(0, 0))

Source code in src\util.py
339
340
341
342
343
344
345
def get_center(data, offset=(0, 0)):
    moments = get_moments(data, offset=offset)
    if moments['m00'] != 0:
        center = get_moments_center(moments)
    else:
        center = np.mean(data, 0).flatten()  # close approximation
    return center.astype(np.float32)

get_center_from_transform(transform)

Source code in src\util.py
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
def get_center_from_transform(transform):
    # from opencv:
    # t0 = (1-alpha) * cx - beta * cy
    # t1 = beta * cx + (1-alpha) * cy
    # where
    # alpha = cos(angle) * scale
    # beta = sin(angle) * scale
    # isolate cx and cy:
    t0, t1 = transform[:2, 2]
    scale = 1
    angle = np.arctan2(transform[0][1], transform[0][0])
    alpha = np.cos(angle) * scale
    beta = np.sin(angle) * scale
    cx = (t1 + t0 * (1 - alpha) / beta) / (beta + (1 - alpha) ** 2 / beta)
    cy = ((1 - alpha) * cx - t0) / beta
    return cx, cy

get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None)

Source code in src\image\util.py
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
def get_data_mapping(data, transform_key=None, transform=None, translation0=None, rotation=None):
    if rotation is None:
        rotation = 0

    if isinstance(data, DataTree):
        sim = msi_utils.get_sim_from_msim(data)
    else:
        sim = data
    sdims = ''.join(si_utils.get_spatial_dims_from_sim(sim))
    sdims = sdims.replace('zyx', 'xyz').replace('yx', 'xy')   # order xy(z)
    origin = si_utils.get_origin_from_sim(sim)
    translation = [origin[sdim] for sdim in sdims]

    if len(translation) == 0:
        translation = [0, 0]
    if len(translation) == 2:
        if translation0 is not None and len (translation0) == 3:
            z = translation0[2]
        else:
            z = 0
        translation = list(translation) + [z]

    if transform is not None:
        translation1, rotation1, _ = get_properties_from_transform(transform, invert=True)
        translation = np.array(translation) + translation1
        rotation += rotation1

    if transform_key is not None:
        transform1 = sim.transforms[transform_key]
        translation1, rotation1, _ = get_properties_from_transform(transform1, invert=True)
        rotation += rotation1

    return translation, rotation

get_default(x, default)

Source code in src\util.py
14
15
def get_default(x, default):
    return default if x is None else x

get_filetitle(filename)

Source code in src\util.py
135
136
137
138
def get_filetitle(filename: str) -> str:
    filebase = os.path.basename(filename)
    title = os.path.splitext(filebase)[0].rstrip('.ome')
    return title

get_image_quantile(image, quantile, axis=None)

Source code in src\image\util.py
454
455
456
def get_image_quantile(image: np.ndarray, quantile: float, axis=None) -> float:
    value = np.quantile(image, quantile, axis=axis).astype(image.dtype)
    return value

get_image_size_info(sizes_xyzct, pixel_nbytes, pixel_type, channels)

Source code in src\image\util.py
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
def get_image_size_info(sizes_xyzct: list, pixel_nbytes: int, pixel_type: np.dtype, channels: list) -> str:
    image_size_info = 'XYZCT:'
    size = 0
    for i, size_xyzct in enumerate(sizes_xyzct):
        w, h, zs, cs, ts = size_xyzct
        size += np.int64(pixel_nbytes) * w * h * zs * cs * ts
        if i > 0:
            image_size_info += ','
        image_size_info += f' {w} {h} {zs} {cs} {ts}'
    image_size_info += f' Pixel type: {pixel_type} Uncompressed: {print_hbytes(size)}'
    if sizes_xyzct[0][3] == 3:
        channel_info = 'rgb'
    else:
        channel_info = ','.join([channel.get('Name', '') for channel in channels])
    if channel_info != '':
        image_size_info += f' Channels: {channel_info}'
    return image_size_info

get_image_window(image, low=0.01, high=0.99)

Source code in src\image\util.py
459
460
461
462
463
464
def get_image_window(image, low=0.01, high=0.99):
    window = (
        get_image_quantile(image, low),
        get_image_quantile(image, high)
    )
    return window

get_max_downsamples(shape, npyramid_add, pyramid_downsample)

Source code in src\image\util.py
498
499
500
501
502
503
504
505
def get_max_downsamples(shape, npyramid_add, pyramid_downsample):
    shape = list(shape)
    for i in range(npyramid_add):
        shape[-1] //= pyramid_downsample
        shape[-2] //= pyramid_downsample
        if shape[-1] < 1 or shape[-2] < 1:
            return i
    return npyramid_add

get_mean_nn_distance(points1, points2)

Source code in src\util.py
483
484
def get_mean_nn_distance(points1, points2):
    return np.mean([get_nn_distance(points1), get_nn_distance(points2)])

get_moments(data, offset=(0, 0))

Source code in src\util.py
330
331
332
def get_moments(data, offset=(0, 0)):
    moments = cv.moments((np.array(data) + offset).astype(np.float32))    # doesn't work for float64!
    return moments

get_moments_center(moments, offset=(0, 0))

Source code in src\util.py
335
336
def get_moments_center(moments, offset=(0, 0)):
    return np.array([moments['m10'], moments['m01']]) / moments['m00'] + np.array(offset)

get_nn_distance(points0)

Source code in src\util.py
472
473
474
475
476
477
478
479
480
def get_nn_distance(points0):
    points = list(set(map(tuple, points0)))     # get unique points
    if len(points) >= 2:
        tree = KDTree(points, leaf_size=2)
        dist, ind = tree.query(points, k=2)
        nn_distance = np.median(dist[:, 1])
    else:
        nn_distance = 1
    return nn_distance

get_numpy_slicing(dimension_order, **slicing)

Source code in src\image\util.py
136
137
138
139
140
141
142
143
144
145
146
147
148
149
def get_numpy_slicing(dimension_order, **slicing):
    slices = []
    for axis in dimension_order:
        index = slicing.get(axis)
        index0 = slicing.get(axis + '0')
        index1 = slicing.get(axis + '1')
        if index0 is not None and index1 is not None:
            slice1 = slice(int(index0), int(index1))
        elif index is not None:
            slice1 = int(index)
        else:
            slice1 = slice(None)
        slices.append(slice1)
    return tuple(slices)

get_orthogonal_pairs(origins, image_size_um)

Get pairs of orthogonal neighbors from a list of tiles. Tiles don't have to be placed on a regular grid.

Source code in src\util.py
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
def get_orthogonal_pairs(origins, image_size_um):
    """
    Get pairs of orthogonal neighbors from a list of tiles.
    Tiles don't have to be placed on a regular grid.
    """
    pairs = []
    angles = []
    z_positions = [pos[0] for pos in origins if len(pos) == 3]
    ordered_z = sorted(set(z_positions))
    is_mixed_3dstack = len(ordered_z) < len(z_positions)
    for i, j in np.transpose(np.triu_indices(len(origins), 1)):
        origini = np.array(origins[i])
        originj = np.array(origins[j])
        if is_mixed_3dstack:
            # ignore z value for distance
            distance = math.dist(origini[-2:], originj[-2:])
            min_distance = max(image_size_um[-2:])
            z_i, z_j = origini[0], originj[0]
            is_same_z = (z_i == z_j)
            is_close_z = abs(ordered_z.index(z_i) - ordered_z.index(z_j)) <= 1
            if not is_same_z:
                # for tiles in different z stack, require greater overlap
                min_distance *= 0.8
            ok = (distance < min_distance and is_close_z)
        else:
            distance = math.dist(origini, originj)
            min_distance = max(image_size_um)
            ok = (distance < min_distance)
        if ok:
            pairs.append((i, j))
            vector = origini - originj
            angle = math.degrees(math.atan2(vector[1], vector[0]))
            if distance < min(image_size_um):
                angle += 90
            while angle < -90:
                angle += 180
            while angle > 90:
                angle -= 180
            angles.append(angle)
    return pairs, angles

get_properties_from_transform(transform, invert=False)

Source code in src\image\util.py
660
661
662
663
664
665
666
667
668
669
670
671
def get_properties_from_transform(transform, invert=False):
    if len(transform.shape) == 3:
        transform = transform[0]
    if invert:
        transform = param_utils.invert_coordinate_order(transform)
    transform = np.array(transform)
    translation = param_utils.translation_from_affine(transform)
    if len(translation) == 2:
        translation = list(translation) + [0]
    rotation = get_rotation_from_transform(transform)
    scale = get_scale_from_transform(transform)
    return translation, rotation, scale

get_rotation_from_transform(transform)

Source code in src\util.py
427
428
429
def get_rotation_from_transform(transform):
    rotation = np.rad2deg(np.arctan2(transform[0][1], transform[0][0]))
    return rotation

get_scale_from_transform(transform)

Source code in src\util.py
397
398
399
def get_scale_from_transform(transform):
    scale = np.mean(np.linalg.norm(transform, axis=0)[:-1])
    return scale

get_sim_physical_size(sim, invert=False)

Source code in src\image\util.py
620
621
622
623
624
def get_sim_physical_size(sim, invert=False):
    size = si_utils.get_shape_from_sim(sim, asarray=True) * si_utils.get_spacing_from_sim(sim, asarray=True)
    if invert:
        size = np.flip(size)
    return size

get_sim_position_final(sim)

Source code in src\image\util.py
540
541
542
543
544
545
def get_sim_position_final(sim):
    transform_keys = si_utils.get_tranform_keys_from_sim(sim)
    transform = combine_transforms([np.array(si_utils.get_affine_from_sim(sim, transform_key))
                                    for transform_key in transform_keys])
    position = apply_transform([si_utils.get_origin_from_sim(sim, asarray=True)], transform)[0]
    return position

get_sim_shape_2d(sim, transform_key=None)

Source code in src\image\util.py
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
def get_sim_shape_2d(sim, transform_key=None):
    if 't' in sim.coords.xindexes:
        # work-around for points error in get_overlap_bboxes()
        sim1 = si_utils.sim_sel_coords(sim, {'t': 0})
    else:
        sim1 = sim
    stack_props = si_utils.get_stack_properties_from_sim(sim1, transform_key=transform_key)
    vertices = mv_graph.get_vertices_from_stack_props(stack_props)
    if vertices.shape[1] == 3:
        # remove z coordinate
        vertices = vertices[:, 1:]
    if len(vertices) >= 8:
        # remove redundant x/y vertices
        vertices = vertices[:4]
    if len(vertices) >= 4:
        # last 2 vertices appear to be swapped
        vertices[2:] = np.array(list(reversed(vertices[2:])))
    return vertices

get_translation_from_transform(transform)

Source code in src\util.py
402
403
404
405
406
def get_translation_from_transform(transform):
    ndim = len(transform) - 1
    #translation = transform[:ndim, ndim]
    translation = apply_transform([[0] * ndim], transform)[0]
    return translation

get_unique_file_labels(filenames)

Source code in src\util.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def get_unique_file_labels(filenames: list) -> list:
    file_labels = []
    file_parts = []
    label_indices = set()
    last_parts = None
    for filename in filenames:
        parts = split_numeric(filename)
        if len(parts) == 0:
            parts = split_numeric(filename)
            if len(parts) == 0:
                parts = filename
        file_parts.append(parts)
        if last_parts is not None:
            for parti, (part1, part2) in enumerate(zip(last_parts, parts)):
                if part1 != part2:
                    label_indices.add(parti)
        last_parts = parts
    label_indices = sorted(list(label_indices))

    for file_part in file_parts:
        file_label = '_'.join([file_part[i] for i in label_indices])
        file_labels.append(file_label)

    if len(set(file_labels)) < len(file_labels):
        # fallback for duplicate labels
        file_labels = [get_filetitle(filename) for filename in filenames]

    return file_labels

get_value_units_micrometer(value_units0)

Source code in src\util.py
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def get_value_units_micrometer(value_units0: list|dict) -> list|dict|None:
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    if value_units0 is None:
        return None

    if isinstance(value_units0, dict):
        values_um = {}
        for dim, value_unit in value_units0.items():
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um[dim] = value_um
    else:
        values_um = []
        for value_unit in value_units0:
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um.append(value_um)
    return values_um

grayscale_image(image)

Source code in src\image\util.py
32
33
34
35
36
37
38
39
def grayscale_image(image):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if nchannels == 4:
        return cv.cvtColor(image, cv.COLOR_RGBA2GRAY)
    elif nchannels > 1:
        return cv.cvtColor(image, cv.COLOR_RGB2GRAY)
    else:
        return image

group_sims_by_z(sims)

Source code in src\image\util.py
548
549
550
551
552
553
554
555
556
557
558
559
560
561
def group_sims_by_z(sims):
    grouped_sims = []
    z_positions = [si_utils.get_origin_from_sim(sim).get('z') for sim in sims]
    is_mixed_3dstack = len(set(z_positions)) < len(z_positions)
    if is_mixed_3dstack:
        sims_by_z = {}
        for simi, z_pos in enumerate(z_positions):
            if z_pos is not None and z_pos not in sims_by_z:
                sims_by_z[z_pos] = []
            sims_by_z[z_pos].append(simi)
        grouped_sims = list(sims_by_z.values())
    if len(grouped_sims) == 0:
        grouped_sims = [list(range(len(sims)))]
    return grouped_sims

image_reshape(image, target_size)

Source code in src\image\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
def image_reshape(image: np.ndarray, target_size: tuple) -> np.ndarray:
    tw, th = target_size
    sh, sw = image.shape[0:2]
    if sw < tw or sh < th:
        dw = max(tw - sw, 0)
        dh = max(th - sh, 0)
        padding = [(dh // 2, dh - dh //  2), (dw // 2, dw - dw // 2)]
        if len(image.shape) == 3:
            padding += [(0, 0)]
        image = np.pad(image, padding, mode='constant', constant_values=(0, 0))
    if tw < sw or th < sh:
        image = image[0:th, 0:tw]
    return image

image_resize(image, target_size0, dimension_order='yxc')

Source code in src\image\util.py
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
def image_resize(image: np.ndarray, target_size0: tuple, dimension_order: str = 'yxc') -> np.ndarray:
    shape = image.shape
    x_index = dimension_order.index('x')
    y_index = dimension_order.index('y')
    c_is_at_end = ('c' in dimension_order and dimension_order.endswith('c'))
    size = shape[x_index], shape[y_index]
    if np.mean(np.divide(size, target_size0)) < 1:
        interpolation = cv.INTER_CUBIC
    else:
        interpolation = cv.INTER_AREA
    dtype0 = image.dtype
    image = ensure_unsigned_image(image)
    target_size = tuple(np.maximum(np.round(target_size0).astype(int), 1))
    if dimension_order in ['yxc', 'yx']:
        new_image = cv.resize(np.asarray(image), target_size, interpolation=interpolation)
    elif dimension_order == 'cyx':
        new_image = np.moveaxis(image, 0, -1)
        new_image = cv.resize(np.asarray(new_image), target_size, interpolation=interpolation)
        new_image = np.moveaxis(new_image, -1, 0)
    else:
        ts = image.shape[dimension_order.index('t')] if 't' in dimension_order else 1
        zs = image.shape[dimension_order.index('z')] if 'z' in dimension_order else 1
        target_shape = list(image.shape).copy()
        target_shape[x_index] = target_size[0]
        target_shape[y_index] = target_size[1]
        new_image = np.zeros(target_shape, dtype=image.dtype)
        for t in range(ts):
            for z in range(zs):
                slices = get_numpy_slicing(dimension_order, z=z, t=t)
                image1 = image[slices]
                if not c_is_at_end:
                    image1 = np.moveaxis(image1, 0, -1)
                new_image1 = np.atleast_3d(cv.resize(np.asarray(image1), target_size, interpolation=interpolation))
                if not c_is_at_end:
                    new_image1 = np.moveaxis(new_image1, -1, 0)
                new_image[slices] = new_image1
    new_image = convert_image_sign_type(new_image, dtype0)
    return new_image

import_csv(filename)

Source code in src\util.py
596
597
598
599
def import_csv(filename):
    with open(filename, encoding='utf8') as file:
        data = csv.reader(file)
    return data

import_json(filename)

Source code in src\util.py
585
586
587
588
def import_json(filename):
    with open(filename, encoding='utf8') as file:
        data = json.load(file)
    return data

import_metadata(content, fields=None, input_path=None)

Source code in src\util.py
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
def import_metadata(content, fields=None, input_path=None):
    # return dict[id] = {values}
    if isinstance(content, str):
        ext = os.path.splitext(content)[1].lower()
        if input_path:
            if isinstance(input_path, list):
                input_path = input_path[0]
            content = os.path.normpath(os.path.join(os.path.dirname(input_path), content))
        if ext == '.csv':
            content = import_csv(content)
        elif ext in ['.json', '.ome.json']:
            content = import_json(content)
    if fields is not None:
        content = [[data[field] for field in fields] for data in content]
    return content

int2float_image(image)

Source code in src\image\util.py
50
51
52
53
54
55
56
def int2float_image(image):
    source_dtype = image.dtype
    if not source_dtype.kind == 'f':
        maxval = 2 ** (8 * source_dtype.itemsize) - 1
        return image / np.float32(maxval)
    else:
        return image

norm_image_quantiles(image0, quantile=0.99)

Source code in src\image\util.py
484
485
486
487
488
489
490
491
492
493
494
495
def norm_image_quantiles(image0, quantile=0.99):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    min_value = np.quantile(image, 1 - quantile)
    max_value = np.quantile(image, quantile)
    normimage = (image - np.mean(image)) / (max_value - min_value)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

norm_image_variance(image0)

Source code in src\image\util.py
472
473
474
475
476
477
478
479
480
481
def norm_image_variance(image0):
    if len(image0.shape) == 3 and image0.shape[2] == 4:
        image, alpha = image0[..., :3], image0[..., 3]
    else:
        image, alpha = image0, None
    normimage = (image - np.mean(image)) / np.std(image)
    normimage = normimage.clip(0, 1).astype(np.float32)
    if alpha is not None:
        normimage = np.dstack([normimage, alpha])
    return normimage

normalise(sims, transform_key, use_global=True)

Source code in src\image\util.py
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
def normalise(sims, transform_key, use_global=True):
    new_sims = []
    dtype = sims[0].dtype
    # global mean and stddev
    if use_global:
        mins = []
        ranges = []
        for sim in sims:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
            #min, max = get_image_window(sim, low=0.01, high=0.99)
            #range = max - min
            mins.append(min)
            ranges.append(range)
        min = np.mean(mins)
        range = np.mean(ranges)
    else:
        min = 0
        range = 1
    # normalise all images
    for sim in sims:
        if not use_global:
            min = np.mean(sim, dtype=np.float32)
            range = np.std(sim, dtype=np.float32)
        image = (sim - min) / range
        image = float2int_image(image.clip(0, 1), dtype)    # np.clip(image) is not dask-compatible, use image.clip() instead
        new_sim = si_utils.get_sim_from_array(
            image,
            dims=sim.dims,
            scale=si_utils.get_spacing_from_sim(sim),
            translation=si_utils.get_origin_from_sim(sim),
            transform_key=transform_key,
            affine=si_utils.get_affine_from_sim(sim, transform_key),
            c_coords=sim.c.data,
            t_coords=sim.t.data
        )
        new_sims.append(new_sim)
    return new_sims

normalise_rotated_positions(positions0, rotations0, size, center)

Source code in src\util.py
456
457
458
459
460
461
462
463
464
465
466
467
468
469
def normalise_rotated_positions(positions0, rotations0, size, center):
    # in [xy(z)]
    positions = []
    rotations = []
    _, angles = get_orthogonal_pairs(positions0, size)
    for position0, rotation in zip(positions0, rotations0):
        if rotation is None and len(angles) > 0:
            rotation = -np.mean(angles)
        angle = -rotation if rotation is not None else None
        transform = create_transform(center=center, angle=angle, matrix_size=4)
        position = apply_transform([position0], transform)[0]
        positions.append(position)
        rotations.append(rotation)
    return positions, rotations

normalise_rotation(rotation)

Normalise rotation to be in the range [-180, 180].

Source code in src\util.py
432
433
434
435
436
437
438
439
440
def normalise_rotation(rotation):
    """
    Normalise rotation to be in the range [-180, 180].
    """
    while rotation < -180:
        rotation += 360
    while rotation > 180:
        rotation -= 360
    return rotation

normalise_values(image, min_value, max_value)

Source code in src\image\util.py
467
468
469
def normalise_values(image: np.ndarray, min_value: float, max_value: float) -> np.ndarray:
    image = (image.astype(np.float32) - min_value) / (max_value - min_value)
    return image.clip(0, 1)

pilmode_to_pixelinfo(mode)

Source code in src\image\util.py
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
def pilmode_to_pixelinfo(mode: str) -> tuple:
    pixelinfo = (np.uint8, 8, 1)
    mode_types = {
        'I': (np.uint32, 32, 1),
        'F': (np.float32, 32, 1),
        'RGB': (np.uint8, 24, 3),
        'RGBA': (np.uint8, 32, 4),
        'CMYK': (np.uint8, 32, 4),
        'YCbCr': (np.uint8, 24, 3),
        'LAB': (np.uint8, 24, 3),
        'HSV': (np.uint8, 24, 3),
    }
    if '16' in mode:
        pixelinfo = (np.uint16, 16, 1)
    elif '32' in mode:
        pixelinfo = (np.uint32, 32, 1)
    elif mode in mode_types:
        pixelinfo = mode_types[mode]
    pixelinfo = (np.dtype(pixelinfo[0]), pixelinfo[1])
    return pixelinfo

points_to_3d(points)

Source code in src\util.py
443
444
def points_to_3d(points):
    return [list(point) + [0] for point in points]

precise_resize(image, factors)

Source code in src\image\util.py
272
273
274
275
276
def precise_resize(image: np.ndarray, factors) -> np.ndarray:
    if image.ndim > len(factors):
        factors = list(factors) + [1]
    new_image = downscale_local_mean(np.asarray(image), tuple(factors)).astype(image.dtype)
    return new_image

print_dict(dct, indent=0)

Source code in src\util.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def print_dict(dct: dict, indent: int = 0) -> str:
    s = ''
    if isinstance(dct, dict):
        for key, value in dct.items():
            s += '\n'
            if not isinstance(value, list):
                s += '\t' * indent + str(key) + ': '
            if isinstance(value, dict):
                s += print_dict(value, indent=indent + 1)
            elif isinstance(value, list):
                for v in value:
                    s += print_dict(v)
            else:
                s += str(value)
    else:
        s += str(dct)
    return s

print_hbytes(nbytes)

Source code in src\util.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def print_hbytes(nbytes: int) -> str:
    exps = ['', 'K', 'M', 'G', 'T', 'P', 'E']
    div = 1024
    exp = 0

    while nbytes > div:
        nbytes /= div
        exp += 1
    if exp < len(exps):
        e = exps[exp]
    else:
        e = f'e{exp * 3}'
    return f'{nbytes:.1f}{e}B'

redimension_data(data, old_order, new_order, **indices)

Source code in src\image\util.py
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
def redimension_data(data, old_order, new_order, **indices):
    # able to provide optional dimension values e.g. t=0, z=0
    if new_order == old_order:
        return data

    new_data = data
    order = old_order
    # remove
    for o in old_order:
        if o not in new_order:
            index = order.index(o)
            dim_value = indices.get(o, 0)
            new_data = np.take(new_data, indices=dim_value, axis=index)
            order = order[:index] + order[index + 1:]
    # add
    for o in new_order:
        if o not in order:
            new_data = np.expand_dims(new_data, 0)
            order = o + order
    # move
    old_indices = [order.index(o) for o in new_order]
    new_indices = list(range(len(new_order)))
    new_data = np.moveaxis(new_data, old_indices, new_indices)
    return new_data

reorder(items, old_order, new_order, default_value=0)

Source code in src\util.py
27
28
29
30
31
32
33
34
35
def reorder(items: list, old_order: str, new_order: str, default_value: int = 0) -> list:
    new_items = []
    for label in new_order:
        if label in old_order:
            item = items[old_order.index(label)]
        else:
            item = default_value
        new_items.append(item)
    return new_items

resize_image(image, new_size)

Source code in src\image\util.py
224
225
226
227
228
229
def resize_image(image, new_size):
    if not isinstance(new_size, (tuple, list, np.ndarray)):
        # use single value for width; apply aspect ratio
        size = np.flip(image.shape[:2])
        new_size = new_size, new_size * size[1] // size[0]
    return cv.resize(image, new_size)

retuple(chunks, shape)

Expand chunks to match shape.

E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028) return (3, 4, 5, 64, 64)

If chunks is an integer, it is applied to all dimensions, to match the behaviour of zarr-python.

Source code in src\util.py
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def retuple(chunks, shape):
    # from ome-zarr-py
    """
    Expand chunks to match shape.

    E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028)
    return (3, 4, 5, 64, 64)

    If chunks is an integer, it is applied to all dimensions, to match
    the behaviour of zarr-python.
    """

    if isinstance(chunks, int):
        return tuple([chunks] * len(shape))

    dims_to_add = len(shape) - len(chunks)
    return *shape[:dims_to_add], *chunks

round_significants(a, significant_digits)

Source code in src\util.py
124
125
126
127
128
def round_significants(a: float, significant_digits: int) -> float:
    if a != 0:
        round_decimals = significant_digits - int(np.floor(np.log10(abs(a)))) - 1
        return round(a, round_decimals)
    return a

show_image(image, title='', cmap=None)

Source code in src\image\util.py
22
23
24
25
26
27
28
29
def show_image(image, title='', cmap=None):
    nchannels = image.shape[2] if len(image.shape) > 2 else 1
    if cmap is None:
        cmap = 'gray' if nchannels == 1 else None
    plt.imshow(image, cmap=cmap)
    if title != '':
        plt.title(title)
    plt.show()

split_num_text(text)

Source code in src\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
def split_num_text(text: str) -> list:
    num_texts = []
    block = ''
    is_num0 = None
    if text is None:
        return None

    for c in text:
        is_num = (c.isnumeric() or c == '.')
        if is_num0 is not None and is_num != is_num0:
            num_texts.append(block)
            block = ''
        block += c
        is_num0 = is_num
    if block != '':
        num_texts.append(block)

    num_texts2 = []
    for block in num_texts:
        block = block.strip()
        try:
            block = float(block)
        except:
            pass
        if block not in [' ', ',', '|']:
            num_texts2.append(block)
    return num_texts2

split_numeric(text)

Source code in src\util.py
153
154
155
156
157
158
159
160
def split_numeric(text: str) -> list:
    num_parts = []
    parts = re.split(r'[_/\\.]', text)
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            num_parts.append(part)
    return num_parts

split_numeric_dict(text)

Source code in src\util.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
def split_numeric_dict(text: str) -> dict:
    num_parts = {}
    parts = re.split(r'[_/\\.]', text)
    parti = 0
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            index = num_span.start()
            label = part[:index]
            if label == '':
                label = parti
            num_parts[label] = num_span.group()
            parti += 1
    return num_parts

split_path(path)

Source code in src\util.py
131
132
def split_path(path: str) -> list:
    return os.path.normpath(path).split(os.path.sep)

split_value_unit_list(text)

Source code in src\util.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
def split_value_unit_list(text: str) -> list:
    value_units = []
    if text is None:
        return None

    items = split_num_text(text)
    if isinstance(items[-1], str):
        def_unit = items[-1]
    else:
        def_unit = ''

    i = 0
    while i < len(items):
        value = items[i]
        if i + 1 < len(items):
            unit = items[i + 1]
        else:
            unit = ''
        if not isinstance(value, str):
            if isinstance(unit, str):
                i += 1
            else:
                unit = def_unit
            value_units.append((value, unit))
        i += 1
    return value_units

uint8_image(image)

Source code in src\image\util.py
68
69
70
71
72
73
74
75
def uint8_image(image):
    source_dtype = image.dtype
    if source_dtype.kind == 'f':
        image = image * 255
    elif source_dtype.itemsize != 1:
        factor = 2 ** (8 * (source_dtype.itemsize - 1))
        image = image // factor
    return image.astype(np.uint8)

validate_transform(transform, max_rotation=None)

Source code in src\util.py
382
383
384
385
386
387
388
389
390
391
392
393
394
def validate_transform(transform, max_rotation=None):
    if transform is None:
        return False
    transform = np.array(transform)
    if np.any(np.isnan(transform)):
        return False
    if np.any(np.isinf(transform)):
        return False
    if np.linalg.det(transform) == 0:
        return False
    if  max_rotation is not None and abs(normalise_rotation(get_rotation_from_transform(transform))) > max_rotation:
        return False
    return True

xyz_to_dict(xyz, axes='xyz')

Source code in src\util.py
447
448
449
def xyz_to_dict(xyz, axes='xyz'):
    dct = {dim: value for dim, value in zip(axes, xyz)}
    return dct

RegistrationMetrics

RegistrationMetrics

Bases: RegistrationMethod

Source code in src\registration_methods\RegistrationMetrics.py
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
class RegistrationMetrics(RegistrationMethod):
    def __init__(self, source_type, reg_function):
        super().__init__(source_type)
        self.reg_function = reg_function
        self.nccs = []
        self.ssims = []

    def registration(self, fixed_data: SpatialImage, moving_data: SpatialImage, **kwargs) -> dict:
        results = self.reg_function(fixed_data, moving_data, **kwargs)

        # TODO: move moving_data using returned transform - see phase_correlation_registration()
        fixed_data = fixed_data.squeeze()
        moving_data = moving_data.squeeze()
        self.nccs.append(calc_ncc(fixed_data, moving_data))
        self.ssims.append(calc_ssim(fixed_data, moving_data))

        return results
nccs = [] instance-attribute
reg_function = reg_function instance-attribute
ssims = [] instance-attribute
__init__(source_type, reg_function)
Source code in src\registration_methods\RegistrationMetrics.py
 8
 9
10
11
12
def __init__(self, source_type, reg_function):
    super().__init__(source_type)
    self.reg_function = reg_function
    self.nccs = []
    self.ssims = []
registration(fixed_data, moving_data, **kwargs)
Source code in src\registration_methods\RegistrationMetrics.py
14
15
16
17
18
19
20
21
22
23
def registration(self, fixed_data: SpatialImage, moving_data: SpatialImage, **kwargs) -> dict:
    results = self.reg_function(fixed_data, moving_data, **kwargs)

    # TODO: move moving_data using returned transform - see phase_correlation_registration()
    fixed_data = fixed_data.squeeze()
    moving_data = moving_data.squeeze()
    self.nccs.append(calc_ncc(fixed_data, moving_data))
    self.ssims.append(calc_ssim(fixed_data, moving_data))

    return results

util

apply_transform(points, transform)

Source code in src\util.py
371
372
373
374
375
376
377
378
379
def apply_transform(points, transform):
    new_points = []
    for point in points:
        point_len = len(point)
        while len(point) < len(transform):
            point = list(point) + [1]
        new_point = np.dot(point, np.transpose(np.array(transform)))
        new_points.append(new_point[:point_len])
    return new_points

check_round_significants(a, significant_digits)

Source code in src\util.py
113
114
115
116
117
118
119
120
121
def check_round_significants(a: float, significant_digits: int) -> float:
    rounded = round_significants(a, significant_digits)
    if a != 0:
        dif = 1 - rounded / a
    else:
        dif = rounded - a
    if abs(dif) < 10 ** -significant_digits:
        return rounded
    return a

convert_rational_value(value)

Source code in src\util.py
321
322
323
324
325
326
327
def convert_rational_value(value) -> float:
    if value is not None and isinstance(value, tuple):
        if value[0] == value[1]:
            value = value[0]
        else:
            value = value[0] / value[1]
    return value

convert_to_um(value, unit)

Source code in src\util.py
310
311
312
313
314
315
316
317
318
def convert_to_um(value, unit):
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    return value * conversions.get(unit, 1)

create_transform(center, angle, matrix_size=3)

Source code in src\util.py
356
357
358
359
360
361
362
363
364
365
366
367
368
def create_transform(center, angle, matrix_size=3):
    if isinstance(center, dict):
        center = dict_to_xyz(center)
    if len(center) == 2:
        center = np.array(list(center) + [0])
    if angle is None:
        angle = 0
    r = Rotation.from_euler('z', angle, degrees=True)
    t = center - r.apply(center, inverse=True)
    transform = np.eye(matrix_size)
    transform[:3, :3] = np.transpose(r.as_matrix())
    transform[:3, -1] += t
    return transform

create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0))

Source code in src\util.py
348
349
350
351
352
353
def create_transform0(center=(0, 0), angle=0, scale=1, translate=(0, 0)):
    transform = cv.getRotationMatrix2D(center[:2], angle, scale)
    transform[:, 2] += translate
    if len(transform) == 2:
        transform = np.vstack([transform, [0, 0, 1]])   # create 3x3 matrix
    return transform

desc_to_dict(desc)

Source code in src\util.py
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
def desc_to_dict(desc: str) -> dict:
    desc_dict = {}
    if desc.startswith('{'):
        try:
            metadata = ast.literal_eval(desc)
            return metadata
        except:
            pass
    for item in re.split(r'[\r\n\t|]', desc):
        item_sep = '='
        if ':' in item:
            item_sep = ':'
        if item_sep in item:
            items = item.split(item_sep)
            key = items[0].strip()
            value = items[1].strip()
            for dtype in (int, float, bool):
                try:
                    value = dtype(value)
                    break
                except:
                    pass
            desc_dict[key] = value
    return desc_dict

dict_to_xyz(dct, keys='xyz')

Source code in src\util.py
452
453
def dict_to_xyz(dct, keys='xyz'):
    return [dct[key] for key in keys if key in dct]

dir_regex(pattern)

Source code in src\util.py
141
142
143
144
145
146
def dir_regex(pattern):
    files = []
    for pattern_item in ensure_list(pattern):
        files.extend(glob.glob(pattern_item, recursive=True))
    files_sorted = sorted(files, key=lambda file: find_all_numbers(get_filetitle(file)))
    return files_sorted

draw_edge_filter(bounds)

Source code in src\util.py
496
497
498
499
500
501
502
503
504
def draw_edge_filter(bounds):
    out_image = np.zeros(np.flip(bounds))
    y, x = np.where(out_image == 0)
    points = np.transpose([x, y])

    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) * 10, 0, 1)
    return position_weights.reshape(np.flip(bounds))

ensure_list(x)

Source code in src\util.py
18
19
20
21
22
23
24
def ensure_list(x) -> list:
    if x is None:
        return []
    elif isinstance(x, list):
        return x
    else:
        return [x]

eval_context(data, key, default_value, context)

Source code in src\util.py
266
267
268
269
270
271
272
273
274
275
276
277
def eval_context(data, key, default_value, context):
    value = data.get(key, default_value)
    if isinstance(value, str):
        try:
            value = value.format_map(context)
        except:
            pass
        try:
            value = eval(value, context)
        except:
            pass
    return value

export_csv(filename, data, header=None)

Source code in src\util.py
602
603
604
605
606
607
608
def export_csv(filename, data, header=None):
    with open(filename, 'w', encoding='utf8', newline='') as file:
        csvwriter = csv.writer(file)
        if header is not None:
            csvwriter.writerow(header)
        for row in data:
            csvwriter.writerow(row)

export_json(filename, data)

Source code in src\util.py
591
592
593
def export_json(filename, data):
    with open(filename, 'w', encoding='utf8') as file:
        json.dump(data, file, indent=4)

filter_dict(dict0)

Source code in src\util.py
38
39
40
41
42
43
44
45
46
47
48
49
50
def filter_dict(dict0: dict) -> dict:
    new_dict = {}
    for key, value0 in dict0.items():
        if value0 is not None:
            values = []
            for value in ensure_list(value0):
                if isinstance(value, dict):
                    value = filter_dict(value)
                values.append(value)
            if len(values) == 1:
                values = values[0]
            new_dict[key] = values
    return new_dict

filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5)

Source code in src\util.py
487
488
489
490
491
492
493
def filter_edge_points(points, bounds, filter_factor=0.1, threshold=0.5):
    center = np.array(bounds) / 2
    dist_center = np.abs(points / center - 1)
    position_weights = np.clip((1 - np.max(dist_center, axis=-1)) / filter_factor, 0, 1)
    order_weights = 1 - np.array(range(len(points))) / len(points) / 2
    weights = position_weights * order_weights
    return weights > threshold

find_all_numbers(text)

Source code in src\util.py
149
150
def find_all_numbers(text: str) -> list:
    return list(map(int, re.findall(r'\d+', text)))

get_center(data, offset=(0, 0))

Source code in src\util.py
339
340
341
342
343
344
345
def get_center(data, offset=(0, 0)):
    moments = get_moments(data, offset=offset)
    if moments['m00'] != 0:
        center = get_moments_center(moments)
    else:
        center = np.mean(data, 0).flatten()  # close approximation
    return center.astype(np.float32)

get_center_from_transform(transform)

Source code in src\util.py
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
def get_center_from_transform(transform):
    # from opencv:
    # t0 = (1-alpha) * cx - beta * cy
    # t1 = beta * cx + (1-alpha) * cy
    # where
    # alpha = cos(angle) * scale
    # beta = sin(angle) * scale
    # isolate cx and cy:
    t0, t1 = transform[:2, 2]
    scale = 1
    angle = np.arctan2(transform[0][1], transform[0][0])
    alpha = np.cos(angle) * scale
    beta = np.sin(angle) * scale
    cx = (t1 + t0 * (1 - alpha) / beta) / (beta + (1 - alpha) ** 2 / beta)
    cy = ((1 - alpha) * cx - t0) / beta
    return cx, cy

get_default(x, default)

Source code in src\util.py
14
15
def get_default(x, default):
    return default if x is None else x

get_filetitle(filename)

Source code in src\util.py
135
136
137
138
def get_filetitle(filename: str) -> str:
    filebase = os.path.basename(filename)
    title = os.path.splitext(filebase)[0].rstrip('.ome')
    return title

get_mean_nn_distance(points1, points2)

Source code in src\util.py
483
484
def get_mean_nn_distance(points1, points2):
    return np.mean([get_nn_distance(points1), get_nn_distance(points2)])

get_moments(data, offset=(0, 0))

Source code in src\util.py
330
331
332
def get_moments(data, offset=(0, 0)):
    moments = cv.moments((np.array(data) + offset).astype(np.float32))    # doesn't work for float64!
    return moments

get_moments_center(moments, offset=(0, 0))

Source code in src\util.py
335
336
def get_moments_center(moments, offset=(0, 0)):
    return np.array([moments['m10'], moments['m01']]) / moments['m00'] + np.array(offset)

get_nn_distance(points0)

Source code in src\util.py
472
473
474
475
476
477
478
479
480
def get_nn_distance(points0):
    points = list(set(map(tuple, points0)))     # get unique points
    if len(points) >= 2:
        tree = KDTree(points, leaf_size=2)
        dist, ind = tree.query(points, k=2)
        nn_distance = np.median(dist[:, 1])
    else:
        nn_distance = 1
    return nn_distance

get_orthogonal_pairs(origins, image_size_um)

Get pairs of orthogonal neighbors from a list of tiles. Tiles don't have to be placed on a regular grid.

Source code in src\util.py
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
def get_orthogonal_pairs(origins, image_size_um):
    """
    Get pairs of orthogonal neighbors from a list of tiles.
    Tiles don't have to be placed on a regular grid.
    """
    pairs = []
    angles = []
    z_positions = [pos[0] for pos in origins if len(pos) == 3]
    ordered_z = sorted(set(z_positions))
    is_mixed_3dstack = len(ordered_z) < len(z_positions)
    for i, j in np.transpose(np.triu_indices(len(origins), 1)):
        origini = np.array(origins[i])
        originj = np.array(origins[j])
        if is_mixed_3dstack:
            # ignore z value for distance
            distance = math.dist(origini[-2:], originj[-2:])
            min_distance = max(image_size_um[-2:])
            z_i, z_j = origini[0], originj[0]
            is_same_z = (z_i == z_j)
            is_close_z = abs(ordered_z.index(z_i) - ordered_z.index(z_j)) <= 1
            if not is_same_z:
                # for tiles in different z stack, require greater overlap
                min_distance *= 0.8
            ok = (distance < min_distance and is_close_z)
        else:
            distance = math.dist(origini, originj)
            min_distance = max(image_size_um)
            ok = (distance < min_distance)
        if ok:
            pairs.append((i, j))
            vector = origini - originj
            angle = math.degrees(math.atan2(vector[1], vector[0]))
            if distance < min(image_size_um):
                angle += 90
            while angle < -90:
                angle += 180
            while angle > 90:
                angle -= 180
            angles.append(angle)
    return pairs, angles

get_rotation_from_transform(transform)

Source code in src\util.py
427
428
429
def get_rotation_from_transform(transform):
    rotation = np.rad2deg(np.arctan2(transform[0][1], transform[0][0]))
    return rotation

get_scale_from_transform(transform)

Source code in src\util.py
397
398
399
def get_scale_from_transform(transform):
    scale = np.mean(np.linalg.norm(transform, axis=0)[:-1])
    return scale

get_translation_from_transform(transform)

Source code in src\util.py
402
403
404
405
406
def get_translation_from_transform(transform):
    ndim = len(transform) - 1
    #translation = transform[:ndim, ndim]
    translation = apply_transform([[0] * ndim], transform)[0]
    return translation

get_unique_file_labels(filenames)

Source code in src\util.py
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
def get_unique_file_labels(filenames: list) -> list:
    file_labels = []
    file_parts = []
    label_indices = set()
    last_parts = None
    for filename in filenames:
        parts = split_numeric(filename)
        if len(parts) == 0:
            parts = split_numeric(filename)
            if len(parts) == 0:
                parts = filename
        file_parts.append(parts)
        if last_parts is not None:
            for parti, (part1, part2) in enumerate(zip(last_parts, parts)):
                if part1 != part2:
                    label_indices.add(parti)
        last_parts = parts
    label_indices = sorted(list(label_indices))

    for file_part in file_parts:
        file_label = '_'.join([file_part[i] for i in label_indices])
        file_labels.append(file_label)

    if len(set(file_labels)) < len(file_labels):
        # fallback for duplicate labels
        file_labels = [get_filetitle(filename) for filename in filenames]

    return file_labels

get_value_units_micrometer(value_units0)

Source code in src\util.py
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
def get_value_units_micrometer(value_units0: list|dict) -> list|dict|None:
    conversions = {
        'nm': 1e-3,
        'µm': 1, 'um': 1, 'micrometer': 1, 'micron': 1,
        'mm': 1e3, 'millimeter': 1e3,
        'cm': 1e4, 'centimeter': 1e4,
        'm': 1e6, 'meter': 1e6
    }
    if value_units0 is None:
        return None

    if isinstance(value_units0, dict):
        values_um = {}
        for dim, value_unit in value_units0.items():
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um[dim] = value_um
    else:
        values_um = []
        for value_unit in value_units0:
            if isinstance(value_unit, (list, tuple)):
                value_um = value_unit[0] * conversions.get(value_unit[1], 1)
            else:
                value_um = value_unit
            values_um.append(value_um)
    return values_um

import_csv(filename)

Source code in src\util.py
596
597
598
599
def import_csv(filename):
    with open(filename, encoding='utf8') as file:
        data = csv.reader(file)
    return data

import_json(filename)

Source code in src\util.py
585
586
587
588
def import_json(filename):
    with open(filename, encoding='utf8') as file:
        data = json.load(file)
    return data

import_metadata(content, fields=None, input_path=None)

Source code in src\util.py
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
def import_metadata(content, fields=None, input_path=None):
    # return dict[id] = {values}
    if isinstance(content, str):
        ext = os.path.splitext(content)[1].lower()
        if input_path:
            if isinstance(input_path, list):
                input_path = input_path[0]
            content = os.path.normpath(os.path.join(os.path.dirname(input_path), content))
        if ext == '.csv':
            content = import_csv(content)
        elif ext in ['.json', '.ome.json']:
            content = import_json(content)
    if fields is not None:
        content = [[data[field] for field in fields] for data in content]
    return content

normalise_rotated_positions(positions0, rotations0, size, center)

Source code in src\util.py
456
457
458
459
460
461
462
463
464
465
466
467
468
469
def normalise_rotated_positions(positions0, rotations0, size, center):
    # in [xy(z)]
    positions = []
    rotations = []
    _, angles = get_orthogonal_pairs(positions0, size)
    for position0, rotation in zip(positions0, rotations0):
        if rotation is None and len(angles) > 0:
            rotation = -np.mean(angles)
        angle = -rotation if rotation is not None else None
        transform = create_transform(center=center, angle=angle, matrix_size=4)
        position = apply_transform([position0], transform)[0]
        positions.append(position)
        rotations.append(rotation)
    return positions, rotations

normalise_rotation(rotation)

Normalise rotation to be in the range [-180, 180].

Source code in src\util.py
432
433
434
435
436
437
438
439
440
def normalise_rotation(rotation):
    """
    Normalise rotation to be in the range [-180, 180].
    """
    while rotation < -180:
        rotation += 360
    while rotation > 180:
        rotation -= 360
    return rotation

points_to_3d(points)

Source code in src\util.py
443
444
def points_to_3d(points):
    return [list(point) + [0] for point in points]

print_dict(dct, indent=0)

Source code in src\util.py
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
def print_dict(dct: dict, indent: int = 0) -> str:
    s = ''
    if isinstance(dct, dict):
        for key, value in dct.items():
            s += '\n'
            if not isinstance(value, list):
                s += '\t' * indent + str(key) + ': '
            if isinstance(value, dict):
                s += print_dict(value, indent=indent + 1)
            elif isinstance(value, list):
                for v in value:
                    s += print_dict(v)
            else:
                s += str(value)
    else:
        s += str(dct)
    return s

print_hbytes(nbytes)

Source code in src\util.py
 98
 99
100
101
102
103
104
105
106
107
108
109
110
def print_hbytes(nbytes: int) -> str:
    exps = ['', 'K', 'M', 'G', 'T', 'P', 'E']
    div = 1024
    exp = 0

    while nbytes > div:
        nbytes /= div
        exp += 1
    if exp < len(exps):
        e = exps[exp]
    else:
        e = f'e{exp * 3}'
    return f'{nbytes:.1f}{e}B'

reorder(items, old_order, new_order, default_value=0)

Source code in src\util.py
27
28
29
30
31
32
33
34
35
def reorder(items: list, old_order: str, new_order: str, default_value: int = 0) -> list:
    new_items = []
    for label in new_order:
        if label in old_order:
            item = items[old_order.index(label)]
        else:
            item = default_value
        new_items.append(item)
    return new_items

retuple(chunks, shape)

Expand chunks to match shape.

E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028) return (3, 4, 5, 64, 64)

If chunks is an integer, it is applied to all dimensions, to match the behaviour of zarr-python.

Source code in src\util.py
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
def retuple(chunks, shape):
    # from ome-zarr-py
    """
    Expand chunks to match shape.

    E.g. if chunks is (64, 64) and shape is (3, 4, 5, 1028, 1028)
    return (3, 4, 5, 64, 64)

    If chunks is an integer, it is applied to all dimensions, to match
    the behaviour of zarr-python.
    """

    if isinstance(chunks, int):
        return tuple([chunks] * len(shape))

    dims_to_add = len(shape) - len(chunks)
    return *shape[:dims_to_add], *chunks

round_significants(a, significant_digits)

Source code in src\util.py
124
125
126
127
128
def round_significants(a: float, significant_digits: int) -> float:
    if a != 0:
        round_decimals = significant_digits - int(np.floor(np.log10(abs(a)))) - 1
        return round(a, round_decimals)
    return a

split_num_text(text)

Source code in src\util.py
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
def split_num_text(text: str) -> list:
    num_texts = []
    block = ''
    is_num0 = None
    if text is None:
        return None

    for c in text:
        is_num = (c.isnumeric() or c == '.')
        if is_num0 is not None and is_num != is_num0:
            num_texts.append(block)
            block = ''
        block += c
        is_num0 = is_num
    if block != '':
        num_texts.append(block)

    num_texts2 = []
    for block in num_texts:
        block = block.strip()
        try:
            block = float(block)
        except:
            pass
        if block not in [' ', ',', '|']:
            num_texts2.append(block)
    return num_texts2

split_numeric(text)

Source code in src\util.py
153
154
155
156
157
158
159
160
def split_numeric(text: str) -> list:
    num_parts = []
    parts = re.split(r'[_/\\.]', text)
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            num_parts.append(part)
    return num_parts

split_numeric_dict(text)

Source code in src\util.py
163
164
165
166
167
168
169
170
171
172
173
174
175
176
def split_numeric_dict(text: str) -> dict:
    num_parts = {}
    parts = re.split(r'[_/\\.]', text)
    parti = 0
    for part in parts:
        num_span = re.search(r'\d+', part)
        if num_span:
            index = num_span.start()
            label = part[:index]
            if label == '':
                label = parti
            num_parts[label] = num_span.group()
            parti += 1
    return num_parts

split_path(path)

Source code in src\util.py
131
132
def split_path(path: str) -> list:
    return os.path.normpath(path).split(os.path.sep)

split_value_unit_list(text)

Source code in src\util.py
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
def split_value_unit_list(text: str) -> list:
    value_units = []
    if text is None:
        return None

    items = split_num_text(text)
    if isinstance(items[-1], str):
        def_unit = items[-1]
    else:
        def_unit = ''

    i = 0
    while i < len(items):
        value = items[i]
        if i + 1 < len(items):
            unit = items[i + 1]
        else:
            unit = ''
        if not isinstance(value, str):
            if isinstance(unit, str):
                i += 1
            else:
                unit = def_unit
            value_units.append((value, unit))
        i += 1
    return value_units

validate_transform(transform, max_rotation=None)

Source code in src\util.py
382
383
384
385
386
387
388
389
390
391
392
393
394
def validate_transform(transform, max_rotation=None):
    if transform is None:
        return False
    transform = np.array(transform)
    if np.any(np.isnan(transform)):
        return False
    if np.any(np.isinf(transform)):
        return False
    if np.linalg.det(transform) == 0:
        return False
    if  max_rotation is not None and abs(normalise_rotation(get_rotation_from_transform(transform))) > max_rotation:
        return False
    return True

xyz_to_dict(xyz, axes='xyz')

Source code in src\util.py
447
448
449
def xyz_to_dict(xyz, axes='xyz'):
    dct = {dim: value for dim, value in zip(axes, xyz)}
    return dct