diff --git a/nanshe/__init__.py b/nanshe/__init__.py index b9406550..9c66d63b 100644 --- a/nanshe/__init__.py +++ b/nanshe/__init__.py @@ -210,13 +210,13 @@ # "viewer" ] -import box -import converter -import io -import imp -import learner -import registerer -import syn -import util -# import viewer # Must be commented as there is some segfault - # coming from Volumina. +from nanshe import box +from nanshe import converter +from nanshe import io +from nanshe import imp +from nanshe import learner +from nanshe import registerer +from nanshe import syn +from nanshe import util +# from nanshe import viewer # Must be commented as there is some segfault + # coming from Volumina. diff --git a/nanshe/box/__init__.py b/nanshe/box/__init__.py index e4654700..b9f69a0d 100644 --- a/nanshe/box/__init__.py +++ b/nanshe/box/__init__.py @@ -18,4 +18,4 @@ __all__ = ["spams_sandbox"] -import spams_sandbox +from nanshe.box import spams_sandbox diff --git a/nanshe/imp/__init__.py b/nanshe/imp/__init__.py index 95aaa034..bd956ba9 100644 --- a/nanshe/imp/__init__.py +++ b/nanshe/imp/__init__.py @@ -15,7 +15,7 @@ __all__ = ["filters", "registration", "renorm", "segment"] -import filters -import registration -import renorm -import segment +from nanshe.imp import filters +from nanshe.imp import registration +from nanshe.imp import renorm +from nanshe.imp import segment diff --git a/nanshe/imp/filters/__init__.py b/nanshe/imp/filters/__init__.py index 7b3dc94c..0552f1bc 100644 --- a/nanshe/imp/filters/__init__.py +++ b/nanshe/imp/filters/__init__.py @@ -16,6 +16,6 @@ __all__ = ["noise", "masks", "wavelet"] -import masks -import noise -import wavelet +from nanshe.imp.filters import masks +from nanshe.imp.filters import noise +from nanshe.imp.filters import wavelet diff --git a/nanshe/imp/filters/noise.py b/nanshe/imp/filters/noise.py index 51815c0b..0153eb9f 100644 --- a/nanshe/imp/filters/noise.py +++ b/nanshe/imp/filters/noise.py @@ -57,19 +57,19 @@ def estimate_noise(input_array, significance_threshold=3.0): >>> estimate_noise(numpy.eye(2), 3) 0.5 - >>> round(estimate_noise(numpy.eye(3), 3), 3) + >>> round(float(estimate_noise(numpy.eye(3), 3)), 3) 0.471 >>> numpy.random.seed(10) - >>> round(estimate_noise(numpy.random.random((2000,2000)), 1), 3) + >>> round(float(estimate_noise(numpy.random.random((2000,2000)), 1)), 3) 0.167 >>> numpy.random.seed(10) - >>> round(estimate_noise(numpy.random.random((2000,2000)), 2), 3) + >>> round(float(estimate_noise(numpy.random.random((2000,2000)), 2)), 3) 0.289 >>> numpy.random.seed(10) - >>> round(estimate_noise(numpy.random.random((2000,2000)), 3), 3) + >>> round(float(estimate_noise(numpy.random.random((2000,2000)), 3)), 3) 0.289 """ diff --git a/nanshe/imp/filters/wavelet.py b/nanshe/imp/filters/wavelet.py index bae2e6a1..5823d326 100644 --- a/nanshe/imp/filters/wavelet.py +++ b/nanshe/imp/filters/wavelet.py @@ -30,6 +30,7 @@ import vigra from nanshe.io import hdf5 +from nanshe.util.iters import irange from nanshe.util.xnumpy import binomial_coefficients @@ -163,7 +164,7 @@ def binomial_1D_vigra_kernel(i, n=4, border_treatment=vigra.filters.BorderTreatm h_kern = binomial_1D_array_kernel(i, n) # Determine the kernel center - h_kern_half_width = (h_kern.size - 1) / 2 + h_kern_half_width = (h_kern.size - 1) // 2 # Default kernel k = vigra.filters.Kernel1D() @@ -405,7 +406,7 @@ def transform(im0, imCur = im0.astype(numpy.float32) - for i in xrange(1, scale.max() + 1): + for i in irange(1, scale.max() + 1): if include_intermediates: imPrev = imCur imOut[i] = imOut[i - 1] @@ -415,7 +416,7 @@ def transform(im0, h_ker = binomial_1D_vigra_kernel(i) - for d in xrange(len(scale)): + for d in irange(len(scale)): if i <= scale[d]: vigra.filters.convolveOneDimension(imCur, d, h_ker, out=imCur) diff --git a/nanshe/imp/registration.py b/nanshe/imp/registration.py index 51a53703..f8b2cea8 100644 --- a/nanshe/imp/registration.py +++ b/nanshe/imp/registration.py @@ -347,7 +347,7 @@ def register_mean_offsets(frames2reg, space_shift_min[space_shift_min == 0] = None space_shift_min = tuple(space_shift_min) reg_frames_slice = tuple( - slice(_1, _2) for _1, _2 in itertools.izip( + slice(_1, _2) for _1, _2 in iters.izip( space_shift_max, space_shift_min ) ) @@ -502,7 +502,7 @@ def translate_fourier(frame_fft, shift): array([[ 0., 1., 2., 3.], [ 4., 5., 6., 7.], [ 8., 9., 10., 11.]]) - >>> af = fft.fftn(a, axes=tuple(xrange(a.ndim))) + >>> af = fft.fftn(a, axes=tuple(iters.irange(a.ndim))) >>> numpy.around(af, decimals=10) array([[ 66. +0.j , -6. +6.j , -6. +0.j , -6. -6.j ], [-24.+13.85640646j, 0. +0.j , 0. +0.j , 0. +0.j ], @@ -517,7 +517,7 @@ def translate_fourier(frame_fft, shift): [ 24.-13.85640646j, 0. -0.j , 0. +0.j , 0. +0.j ]]) >>> fft.ifftn( - ... atf, axes=tuple(xrange(a.ndim)) + ... atf, axes=tuple(iters.irange(a.ndim)) ... ).real.round().astype(int).astype(float) array([[ 9., 10., 11., 8.], [ 1., 2., 3., 0.], @@ -532,7 +532,7 @@ def translate_fourier(frame_fft, shift): >>> fft.ifftn( - ... atf, axes=tuple(xrange(1, a.ndim)) + ... atf, axes=tuple(iters.irange(1, a.ndim)) ... ).real.round().astype(int).astype(float) array([[[ 9., 10., 11., 8.], [ 1., 2., 3., 0.], diff --git a/nanshe/imp/renorm.py b/nanshe/imp/renorm.py index 4728c9e2..ddbccf11 100644 --- a/nanshe/imp/renorm.py +++ b/nanshe/imp/renorm.py @@ -25,6 +25,8 @@ # Generally useful and fast to import so done immediately. import numpy +from nanshe.util import iters + # Need in order to have logging information no matter what. from nanshe.util import prof @@ -225,7 +227,7 @@ def renormalized_images(input_array, ord=2, output_array=None): # does not support the axis keyword. So, we must use a for loop. # Take each image at each time and turn the image into a vector. # Then, find the norm and divide each image by this norm. - for i in xrange(output_array.shape[0]): + for i in iters.irange(output_array.shape[0]): output_array_i = output_array[i] output_array_i_norm = numpy.linalg.norm( output_array_i.ravel(), ord=ord diff --git a/nanshe/imp/segment.py b/nanshe/imp/segment.py index 22584f7e..4c1589d7 100644 --- a/nanshe/imp/segment.py +++ b/nanshe/imp/segment.py @@ -82,17 +82,17 @@ from nanshe.util import xnumpy # Short function to process image data. -import filters.masks -from renorm import zeroed_mean_images, renormalized_images +from nanshe.imp import filters +from nanshe.imp.renorm import zeroed_mean_images, renormalized_images # Algorithms to register the data. -import registration +from nanshe.imp import registration # To remove noise from the basis images -from filters.noise import estimate_noise, significant_mask +from nanshe.imp.filters.noise import estimate_noise, significant_mask # Wavelet transformation operations -from filters import wavelet +from nanshe.imp.filters import wavelet from nanshe.io import hdf5 @@ -148,7 +148,7 @@ def remove_zeroed_lines(new_data, zero_masks_dilated = numpy.zeros(new_data.shape, dtype=bool) zero_masks_outline = numpy.zeros(new_data.shape, dtype=bool) - for i in xrange(new_data.shape[0]): + for i in iters.irange(new_data.shape[0]): new_data_i = new_data[i] zero_mask_i = (new_data_i == 0) @@ -162,7 +162,7 @@ def remove_zeroed_lines(new_data, zero_mask_i ) - for j in xrange(1, zero_mask_i_num_labels + 1): + for j in iters.irange(1, zero_mask_i_num_labels + 1): zero_mask_i_labeled_j = (zero_mask_i_labeled == j) zero_mask_i_labeled_j_dilated = filters.masks.binary_dilation( @@ -397,7 +397,7 @@ def estimate_f0(new_data, vigra.filters.BorderTreatmentMode.BORDER_TREATMENT_REFLECT ) - for d in xrange(1, new_data_f0_estimation.ndim): + for d in iters.irange(1, new_data_f0_estimation.ndim): vigra.filters.convolveOneDimension( new_data_f0_estimation, d, @@ -1113,7 +1113,7 @@ def region_properties_scikit_image(new_label_image, *args, **kwargs): properties = ["area", "centroid"] if ((properties == "all") or (properties is None)): - properties = region_properties_type_dict.keys() + properties = list(region_properties_type_dict.keys()) intensity_image = None if (len(args)) and (args[0]): @@ -1154,7 +1154,7 @@ def region_properties_scikit_image(new_label_image, *args, **kwargs): ) new_label_image_props_with_arrays = [] - for i in xrange(len(new_label_image_props)): + for i in iters.irange(len(new_label_image_props)): new_label_image_props_with_arrays.append({}) for each_key in properties: @@ -1192,7 +1192,7 @@ def region_properties_scikit_image(new_label_image, *args, **kwargs): # Store the values to place in NumPy structured array in order. new_label_image_props_with_arrays_values = [] - for j in xrange(len(new_label_image_props_with_arrays)): + for j in iters.irange(len(new_label_image_props_with_arrays)): # Add all values in order of keys from the dictionary. new_label_image_props_with_arrays_values.append([]) for each_new_label_image_props_with_arrays_dtype in new_label_image_props_with_arrays_dtype: @@ -1464,7 +1464,7 @@ def region_properties_vigra(new_label_image, *args, **kwargs): properties = ["area", "centroid"] if ((properties == "all") or (properties is None)): - properties = region_properties_type_dict.keys() + properties = list(region_properties_type_dict.keys()) intensity_image = None if (len(args)) and (args[0]): @@ -1550,7 +1550,7 @@ def __getitem__(self, item): new_label_image, intensity_image) new_label_image_props_with_arrays = [] - for i in xrange(len(new_label_image_props)): + for i in iters.irange(len(new_label_image_props)): new_label_image_props_with_arrays.append({}) for each_key in properties: @@ -1587,7 +1587,7 @@ def __getitem__(self, item): # Store the values to place in NumPy structured array in order. new_label_image_props_with_arrays_values = [] - for j in xrange(len(new_label_image_props_with_arrays)): + for j in iters.irange(len(new_label_image_props_with_arrays)): # Add all values in order of keys from the dictionary. new_label_image_props_with_arrays_values.append([]) for each_new_label_image_props_with_arrays_dtype in new_label_image_props_with_arrays_dtype: @@ -1719,28 +1719,28 @@ def get_neuron_dtype(shape, dtype): Examples: >>> get_neuron_dtype( ... (3,), numpy.float64 - ... ) #doctest: +NORMALIZE_WHITESPACE - [('mask', , (3,)), - ('contour', , (3,)), - ('image', , (3,)), - ('area', ), - ('max_F', ), - ('gaussian_mean', , (1,)), - ('gaussian_cov', , (1, 1)), - ('centroid', , (1,))] + ... ) #doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS + [('mask', <... 'numpy.bool_'>, (3,)), + ('contour', <... 'numpy.bool_'>, (3,)), + ('image', <... 'numpy.float64'>, (3,)), + ('area', <... 'numpy.float64'>), + ('max_F', <... 'numpy.float64'>), + ('gaussian_mean', <... 'numpy.float64'>, (1,)), + ('gaussian_cov', <... 'numpy.float64'>, (1, 1)), + ('centroid', <... 'numpy.float64'>, (1,))] >>> get_neuron_dtype( ... (2, 3), numpy.float64 - ... ) #doctest: +NORMALIZE_WHITESPACE - [('mask', , (2, 3)), - ('contour', , (2, 3)), - ('image', , (2, 3)), - ('area', ), - ('max_F', ), - ('gaussian_mean', , (2,)), - ('gaussian_cov', , (2, 2)), - ('centroid', , (2,))] + ... ) #doctest: +NORMALIZE_WHITESPACE, +ELLIPSIS + [('mask', <... 'numpy.bool_'>, (2, 3)), + ('contour', <... 'numpy.bool_'>, (2, 3)), + ('image', <... 'numpy.float64'>, (2, 3)), + ('area', <... 'numpy.float64'>), + ('max_F', <... 'numpy.float64'>), + ('gaussian_mean', <... 'numpy.float64'>, (2,)), + ('gaussian_cov', <... 'numpy.float64'>, (2, 2)), + ('centroid', <... 'numpy.float64'>, (2,))] """ ndim = len(shape) @@ -2398,7 +2398,7 @@ def remove_low_intensity_local_maxima(local_maxima, low_intensities__local_maxima_label_mask__to_remove = numpy.zeros( local_maxima.props.shape, dtype=bool ) - for i in xrange(len(local_maxima.props)): + for i in iters.irange(len(local_maxima.props)): # Get the region with the label matching the maximum each_region_image_wavelet_mask = ( local_maxima.label_image == local_maxima.props["label"][i] @@ -2466,7 +2466,7 @@ def remove_too_close_local_maxima(local_maxima, # Find the distance between every local max (efficiently) local_maxima_pairs = numpy.array( - list(itertools.combinations(xrange(len(local_maxima.props)), 2))) + list(itertools.combinations(iters.irange(len(local_maxima.props)), 2))) local_maxima_distances = scipy.spatial.distance.pdist( local_maxima.props["local_max"], metric="euclidean") @@ -2556,7 +2556,7 @@ def wavelet_denoising(new_image, ) if wavelet_denoising.recorders.array_debug_recorder: - for i in xrange(len(new_wavelet_transformed_image)): + for i in iters.irange(len(new_wavelet_transformed_image)): wavelet_denoising.recorders.array_debug_recorder["new_wavelet_transformed_image"] = \ new_wavelet_transformed_image[i][None] @@ -2569,7 +2569,7 @@ def wavelet_denoising(new_image, ) if wavelet_denoising.recorders.array_debug_recorder: - for i in xrange(len(new_wavelet_transformed_image_significant_mask)): + for i in iters.irange(len(new_wavelet_transformed_image_significant_mask)): wavelet_denoising.recorders.array_debug_recorder["new_wavelet_transformed_image_significant_mask"] = \ new_wavelet_transformed_image_significant_mask[i][None] @@ -2597,7 +2597,7 @@ def wavelet_denoising(new_image, # For holding the label image properties new_wavelet_image_denoised_labeled_props = region_properties( new_wavelet_image_denoised_labeled, - properties=accepted_region_shape_constraints.keys() + properties=list(accepted_region_shape_constraints.keys()) ) logger.debug("Determined the properties of the label image.") @@ -2756,7 +2756,7 @@ def wavelet_denoising(new_image, watershed_local_maxima = ExtendedRegionProps( local_maxima.intensity_image, new_wavelet_image_denoised_segmentation, - properties=["centroid"] + accepted_neuron_shape_constraints.keys() + properties=["centroid"] + list(accepted_neuron_shape_constraints.keys()) ) wavelet_denoising.recorders.array_debug_recorder["watershed_local_maxima_label_image"] = watershed_local_maxima.label_image[None] @@ -2901,7 +2901,7 @@ def extract_neurons(new_image, neuron_masks): neurons["area"] = xnumpy.array_to_matrix(neurons["mask"]).sum(axis=1) neurons["max_F"] = xnumpy.array_to_matrix(neurons["image"]).max(axis=1) - for i in xrange(len(neurons)): + for i in iters.irange(len(neurons)): neuron_mask_i_points = numpy.array(neurons["mask"][i].nonzero()) neurons["contour"][i] = xnumpy.generate_contour_fast( @@ -3224,7 +3224,7 @@ def merge_neuron_sets_once(new_neuron_set_1, # Fuse all the neurons that can be from new_neuron_set_2 to the # new_neuron_set (composed of new_neuron_set_1) - for i, j in itertools.izip( + for i, j in iters.izip( new_neuron_set_all_optimal_i, new_neuron_set_all_j_fuse ): #fuse_neurons.recorders.array_debug_recorder = hdf5.record.HDF5EnumeratedArrayRecorder( @@ -3527,7 +3527,7 @@ def merge_neuron_sets_repeatedly(new_neuron_set_1, # Fuse all the neurons that can be from new_neuron_set_2 to the # new_neuron_set (composed of new_neuron_set_1) - for i, j in itertools.izip( + for i, j in iters.izip( new_neuron_set_all_optimal_i, new_neuron_set_all_j_fuse ): #fuse_neurons.recorders.array_debug_recorder = hdf5.record.HDF5EnumeratedArrayRecorder( @@ -3574,7 +3574,7 @@ def expand_rois(new_data, roi_masks, **parameters): # Compute the area of each ROI in order to # properly compute the average activity of each ROI. - roi_areas = roi_masks.sum(axis=tuple(xrange(1, roi_masks.ndim))) + roi_areas = roi_masks.sum(axis=tuple(iters.irange(1, roi_masks.ndim))) # Add fake dimensions so that both arrays have dimensions number of ROIs # and then tyx or tzyx. @@ -3594,7 +3594,7 @@ def expand_rois(new_data, roi_masks, **parameters): # the ROI time_traces = new_data_normalized_expanded * roi_masks_expanded time_traces = time_traces.sum( - axis=tuple(xrange(2, new_data_normalized_expanded.ndim)) + axis=tuple(iters.irange(2, new_data_normalized_expanded.ndim)) ) time_traces /= roi_areas_expanded diff --git a/nanshe/io/__init__.py b/nanshe/io/__init__.py index 3bbd709f..bfeb8921 100644 --- a/nanshe/io/__init__.py +++ b/nanshe/io/__init__.py @@ -15,6 +15,6 @@ __all__ = ["hdf5", "xjson", "xtiff"] -import hdf5 -import xjson -import xtiff +from nanshe.io import hdf5 +from nanshe.io import xjson +from nanshe.io import xtiff diff --git a/nanshe/io/hdf5/__init__.py b/nanshe/io/hdf5/__init__.py index a2be2ee9..605aa9da 100644 --- a/nanshe/io/hdf5/__init__.py +++ b/nanshe/io/hdf5/__init__.py @@ -18,6 +18,6 @@ __all__ = ["record", "search", "serializers"] -import record -import search -import serializers +from nanshe.io.hdf5 import record +from nanshe.io.hdf5 import search +from nanshe.io.hdf5 import serializers diff --git a/nanshe/io/hdf5/record.py b/nanshe/io/hdf5/record.py index dd578981..f9af4d49 100644 --- a/nanshe/io/hdf5/record.py +++ b/nanshe/io/hdf5/record.py @@ -45,7 +45,7 @@ def b(x, y): import numpy import h5py -import serializers +from nanshe.io.hdf5 import serializers from nanshe.util import wrappers diff --git a/nanshe/io/hdf5/search.py b/nanshe/io/hdf5/search.py index c1b22e62..f176c423 100644 --- a/nanshe/io/hdf5/search.py +++ b/nanshe/io/hdf5/search.py @@ -27,6 +27,8 @@ import h5py +from nanshe.util import iters + # Need in order to have logging information no matter what. from nanshe.util import prof @@ -153,7 +155,7 @@ def get_matching_paths_groups_recursive(a_filehandle, a_path_pattern): groups = get_matching_paths_groups_recursive(a_filehandle, a_path_pattern) new_groups = [] - for i in xrange(len(groups)): + for i in iters.irange(len(groups)): new_groups.append(list(groups[i])) groups = new_groups @@ -188,7 +190,7 @@ def get_matching_grouped_paths(a_filehandle, a_path_pattern): paths_found[each_path] = None - paths_found = paths_found.keys() + paths_found = list(paths_found.keys()) return(paths_found) diff --git a/nanshe/io/hdf5/serializers.py b/nanshe/io/hdf5/serializers.py index d09396e9..027e438c 100644 --- a/nanshe/io/hdf5/serializers.py +++ b/nanshe/io/hdf5/serializers.py @@ -30,6 +30,12 @@ from nanshe.util import prof +try: + unicode +except NameError: + unicode = str + + # Get the logger trace_logger = prof.getTraceLogger(__name__) diff --git a/nanshe/io/xjson.py b/nanshe/io/xjson.py index 43ce2bbe..1bf33421 100644 --- a/nanshe/io/xjson.py +++ b/nanshe/io/xjson.py @@ -26,6 +26,12 @@ from nanshe.util import prof +try: + unicode +except NameError: + unicode = str + + # Get the logger trace_logger = prof.getTraceLogger(__name__) logger = prof.logging.getLogger(__name__) @@ -76,7 +82,9 @@ def ascii_encode_str(value, json_dict=json_dict): """ new_value = None - if not value.startswith("__comment__"): + if unicode == str and not value.startswith(u"__comment__"): + new_value = value + elif not value.startswith("__comment__"): new_value = value.encode("utf-8") return(new_value) @@ -101,8 +109,7 @@ def ascii_encode_list(data, json_dict=json_dict): new_each_value = ascii_encode_dict(new_each_value) elif isinstance(new_each_value, list): new_each_value = ascii_encode_list(new_each_value) - elif isinstance(new_each_value, unicode) or \ - isinstance(new_each_value, str): + elif isinstance(new_each_value, (bytes, unicode)): new_each_value = ascii_encode_str(new_each_value) if new_each_value is not None: diff --git a/nanshe/io/xtiff.py b/nanshe/io/xtiff.py index bf95f993..97d1a0e9 100644 --- a/nanshe/io/xtiff.py +++ b/nanshe/io/xtiff.py @@ -43,6 +43,11 @@ from skimage.external import tifffile +try: + unicode +except NameError: + unicode = str + # Get the logger trace_logger = prof.getTraceLogger(__name__) @@ -105,9 +110,9 @@ def get_multipage_tiff_shape_dtype_transformed(new_tiff_filename, assert (len(axis_order) == 5) assert all([_ in axis_order for _ in "zyxtc"]) - new_tiff_file_shape, new_tiff_file_dtype = get_multipage_tiff_shape_dtype( + new_tiff_file_shape, new_tiff_file_dtype = list(get_multipage_tiff_shape_dtype( new_tiff_filename - ).values() + ).values()) # Correct if the tiff is missing dims by adding singletons if (len(new_tiff_file_shape) == 5): @@ -190,7 +195,7 @@ def get_standard_tiff_array(new_tiff_filename, # Fit the old VIGRA style array. (may try to remove in the future) new_tiff_array = new_tiff_array.transpose( - tuple(xrange(new_tiff_array.ndim - 1, 1, -1)) + (1, 0) + tuple(iters.irange(new_tiff_array.ndim - 1, 1, -1)) + (1, 0) ) # Check to make sure the dimensions are ok @@ -264,13 +269,13 @@ def get_standard_tiff_data(new_tiff_filename, with tifffile.TiffFile(new_tiff_filename) as new_tiff_file: new_tiff_array = new_tiff_file.asarray(memmap=memmap) - for i in xrange( + for i in iters.irange( 0, len(new_tiff_file), pages_to_channel ): new_tiff_description.append([]) - for j in xrange(pages_to_channel): + for j in iters.irange(pages_to_channel): each_page = new_tiff_file[i + j] each_metadata = each_page.tags each_desc = u"" @@ -295,7 +300,7 @@ def get_standard_tiff_data(new_tiff_filename, # Fit the old VIGRA style array. (may try to remove in the future) new_tiff_array = new_tiff_array.transpose( - tuple(xrange(new_tiff_array.ndim - 1, 1, -1)) + (1, 0) + tuple(iters.irange(new_tiff_array.ndim - 1, 1, -1)) + (1, 0) ) # Check to make sure the dimensions are ok @@ -379,10 +384,12 @@ def convert_tiffs(new_tiff_filenames, assert (pages_to_channel > 0) # Get the axes that do not change - static_axes = numpy.array(list(iters.xrange_with_skip(3, to_skip=axis))) + static_axes = numpy.array(list(iters.xrange_with_skip( + 3, to_skip=axis + ))) # if it is only a single str, make it a singleton list - if isinstance(new_tiff_filenames, str): + if isinstance(new_tiff_filenames, (bytes, unicode)): new_tiff_filenames = [new_tiff_filenames] # Expand any regex in path names @@ -398,17 +405,17 @@ def convert_tiffs(new_tiff_filenames, new_hdf5_dataset_dtype = bool for each_new_tiff_filename in new_tiff_filenames: # Add each filename. - new_hdf5_dataset_filenames.append(each_new_tiff_filename) + new_hdf5_dataset_filenames.append(unicode(each_new_tiff_filename)) # Get all of the offsets. new_hdf5_dataset_offsets.append(new_hdf5_dataset_shape[axis]) # Get the shape and type of each frame. - each_new_tiff_file_shape, each_new_tiff_file_dtype = get_multipage_tiff_shape_dtype_transformed( + each_new_tiff_file_shape, each_new_tiff_file_dtype = list(get_multipage_tiff_shape_dtype_transformed( each_new_tiff_filename, axis_order="cztyx", pages_to_channel=pages_to_channel - ).values() + ).values()) each_new_tiff_file_shape = each_new_tiff_file_shape[2:] # Find the increase on the merge axis. Find the largest shape for the @@ -459,7 +466,12 @@ def convert_tiffs(new_tiff_filenames, new_hdf5_dataset_dtype, chunks=True ) - new_hdf5_dataset.attrs["filenames"] = new_hdf5_dataset_filenames + new_hdf5_dataset.attrs.create( + "filenames", + new_hdf5_dataset_filenames, + shape=new_hdf5_dataset_filenames.shape, + dtype=h5py.special_dtype(vlen=unicode) + ) new_hdf5_dataset.attrs["offsets"] = new_hdf5_dataset_offsets # Workaround required due to this issue # ( https://github.com/h5py/h5py/issues/289 ). diff --git a/nanshe/learner.py b/nanshe/learner.py index 7b112b7c..03f8200c 100644 --- a/nanshe/learner.py +++ b/nanshe/learner.py @@ -486,7 +486,7 @@ def generate_neurons_blocks(input_filename, # Overwrite the config file always with open(intermediate_config, "w") as fid: json.dump( - dict(parameters.items() + {"debug" : debug}.items()), + dict(list(parameters.items()) + list({"debug" : debug}.items())), fid, indent=4, separators=(",", " : ") @@ -596,7 +596,7 @@ def generate_neurons_blocks(input_filename, executable_run += "from %s import main; exit(main(*argv))" % \ (cur_module_name,) - block_process_args_gen = itertools.izip( + block_process_args_gen = iters.izip( itertools.repeat(python), itertools.repeat("-c"), itertools.repeat(executable_run), @@ -751,7 +751,7 @@ def generate_neurons_blocks(input_filename, ) for i, i_str, (output_filename_block_i, sequential_block_i) in iters.filled_stringify_enumerate( - itertools.izip(output_filename_block, original_images_pared_slices.flat)): + iters.izip(output_filename_block, original_images_pared_slices.flat)): windowed_slice_i = tuple( slice(_1, _2, 1) for _1, _2 in [(None, None)] + sequential_block_i["windowed_stack_selection"].tolist()[1:] ) @@ -770,7 +770,7 @@ def generate_neurons_blocks(input_filename, numpy.apply_over_axes( numpy.sum, neurons_block_i_smaller["mask"].astype(float), - tuple(xrange(1, neurons_block_i_smaller["mask"].ndim)) + tuple(iters.irange(1, neurons_block_i_smaller["mask"].ndim)) ) ) @@ -782,7 +782,7 @@ def generate_neurons_blocks(input_filename, numpy.apply_over_axes( numpy.sum, neurons_block_i_smaller["mask"][window_trimmed_i].astype(float), - tuple(xrange(1, neurons_block_i_smaller["mask"].ndim)) + tuple(iters.irange(1, neurons_block_i_smaller["mask"].ndim)) ) ) diff --git a/nanshe/misc/__init__.py b/nanshe/misc/__init__.py index d4c07abe..48ac07fd 100644 --- a/nanshe/misc/__init__.py +++ b/nanshe/misc/__init__.py @@ -19,5 +19,5 @@ "random_dictionary_learning_data" ] -# import neuron_matplotlib_viewer -import random_dictionary_learning_data +# from nanshe.misc import neuron_matplotlib_viewer +from nanshe.misc import random_dictionary_learning_data diff --git a/nanshe/misc/random_dictionary_learning_data.py b/nanshe/misc/random_dictionary_learning_data.py index 08467d40..5607e322 100644 --- a/nanshe/misc/random_dictionary_learning_data.py +++ b/nanshe/misc/random_dictionary_learning_data.py @@ -78,7 +78,7 @@ def __call__(self, size=1): results = [self.args[_] for _ in indices] else: - results = [None for _ in xrange(size)] + results = [None for _ in nanshe.util.iters.irange(size)] return(results) @@ -169,7 +169,9 @@ def __call__(self, p, size=1): group_sizes = numpy.random.geometric(p, size) # Using the sizes draw element to fill groups up to the right size - results = [uni_gen(group_sizes[i]) for i in xrange(size)] + results = [ + uni_gen(group_sizes[i]) for i in nanshe.util.iters.irange(size) + ] return(results) @@ -282,7 +284,7 @@ def __call__(self, num_runs=1, seed=None): # A list of DictionaryLearningRandomDataSample instances results = [] - for i in xrange(num_runs): + for i in nanshe.util.iters.irange(num_runs): # Where the result will be stored each_result = DictionaryLearningRandomDataSample() @@ -355,7 +357,7 @@ def __call__(self, num_runs=1, seed=None): # Determines how much to spread each active point # (self.object_spread is like the average spread) sigma = 2 * self.object_spread * numpy.random.random() - for each_frame_num in xrange(self.num_frames): + for each_frame_num in nanshe.util.iters.irange(self.num_frames): # Determines a linear rescaling of each image (where they # slowly become dimmer) rescale = float( diff --git a/nanshe/registerer.py b/nanshe/registerer.py index 8f7c9215..f31a026f 100644 --- a/nanshe/registerer.py +++ b/nanshe/registerer.py @@ -26,7 +26,7 @@ import h5py -from nanshe.util import prof +from nanshe.util import iters, prof from nanshe.io import xjson from nanshe.util.pathHelpers import PathComponents from nanshe.imp import registration @@ -106,7 +106,7 @@ def main(*argv): PathComponents(each_output_filename) ) - for each_input_filename_components, each_output_filename_components in itertools.izip( + for each_input_filename_components, each_output_filename_components in iters.izip( parsed_args.input_file_components, parsed_args.output_file_components): with h5py.File(each_input_filename_components.externalPath, "r") as input_file: with h5py.File(each_output_filename_components.externalPath, "a") as output_file: diff --git a/nanshe/syn/__init__.py b/nanshe/syn/__init__.py index 9073538c..e111deb2 100644 --- a/nanshe/syn/__init__.py +++ b/nanshe/syn/__init__.py @@ -17,4 +17,4 @@ __all__ = ["data"] -import data +from nanshe.syn import data diff --git a/nanshe/syn/data.py b/nanshe/syn/data.py index e40d2d6c..88adb9c2 100644 --- a/nanshe/syn/data.py +++ b/nanshe/syn/data.py @@ -18,14 +18,13 @@ __date__ = "$Aug 01, 2014 14:55:57 EDT$" -import itertools - import numpy import scipy import scipy.ndimage import scipy.ndimage.filters +import nanshe.util.iters import nanshe.util.xnumpy @@ -380,7 +379,7 @@ def generate_gaussian_images(space, means, std_devs, magnitudes): magnitudes.shape + tuple(space.tolist()), dtype=float ) for i, (each_mean, each_std_dev, each_magnitude) in enumerate( - itertools.izip(means, std_devs, magnitudes) + nanshe.util.iters.izip(means, std_devs, magnitudes) ): images[i][tuple(each_mean)] = each_magnitude images[i] = scipy.ndimage.filters.gaussian_filter( @@ -458,8 +457,8 @@ def generate_random_bound_points(space, radii): # Generate a random point for each radius. points = numpy.zeros(radii.shape + space.shape, dtype=int) - for i in xrange(len(radii)): - for j in xrange(len(space)): + for i in nanshe.util.iters.irange(len(radii)): + for j in nanshe.util.iters.irange(len(space)): points[i][j] = numpy.random.randint( bound_space[i][j][0], bound_space[i][j][1] ) diff --git a/nanshe/util/__init__.py b/nanshe/util/__init__.py index ef38b498..1f3be8d1 100644 --- a/nanshe/util/__init__.py +++ b/nanshe/util/__init__.py @@ -18,9 +18,9 @@ "iters", "pathHelpers", "prof", "wrappers", "xglob", "xnumpy" ] -import iters -import pathHelpers -import prof -import wrappers -import xglob -import xnumpy +from nanshe.util import iters +from nanshe.util import pathHelpers +from nanshe.util import prof +from nanshe.util import wrappers +from nanshe.util import xglob +from nanshe.util import xnumpy diff --git a/nanshe/util/iters.py b/nanshe/util/iters.py index 2f767980..4948abf2 100644 --- a/nanshe/util/iters.py +++ b/nanshe/util/iters.py @@ -24,7 +24,23 @@ import numpy # Need in order to have logging information no matter what. -import prof +from nanshe.util import prof + + +try: + from itertools import izip_longest +except ImportError: + from itertools import zip_longest as izip_longest + +try: + from itertools import izip +except ImportError: + izip = zip + +try: + irange = xrange +except NameError: + irange = range # Get the logger @@ -68,9 +84,9 @@ def index_generator(*sizes): [(0, 0), (0, 1), (1, 0), (1, 1), (2, 0), (2, 1)] """ - # Creates a list of xrange generator objects over each respective + # Creates a list of irange generator objects over each respective # dimension of sizes - gens = [xrange(_) for _ in sizes] + gens = [irange(_) for _ in sizes] # Combines the generators to a single generator of indices that go # throughout sizes @@ -118,9 +134,9 @@ def index_enumerator(*sizes): [(0, (0, 0)), (1, (0, 1)), (2, (1, 0)), (3, (1, 1)), (4, (2, 0)), (5, (2, 1))] """ - # Creates a list of xrange generator objects over each respective + # Creates a list of irange generator objects over each respective # dimension of sizes - gens = [xrange(_) for _ in sizes] + gens = [irange(_) for _ in sizes] # Combines the generators to a single generator of indices that go # throughout sizes @@ -155,7 +171,7 @@ def list_indices_to_index_array(list_indices): # Combines the indices so that one dimension is represented by each list. # Then converts this to a tuple numpy.ndarrays. - return(tuple(numpy.array(zip(*list_indices)))) + return(tuple(numpy.array(list(izip(*list_indices))))) @prof.log_call(trace_logger) @@ -233,22 +249,22 @@ def repeat_generator(a_iter, n=1): from the given iterator. Examples: - >>> repeat_generator(xrange(5)) #doctest: +ELLIPSIS + >>> repeat_generator(irange(5)) #doctest: +ELLIPSIS - >>> list(repeat_generator(xrange(0))) + >>> list(repeat_generator(irange(0))) [] - >>> list(repeat_generator(xrange(5), 0)) + >>> list(repeat_generator(irange(5), 0)) [] - >>> list(repeat_generator(xrange(5), 1)) + >>> list(repeat_generator(irange(5), 1)) [0, 1, 2, 3, 4] - >>> list(repeat_generator(xrange(5), 2)) + >>> list(repeat_generator(irange(5), 2)) [0, 0, 1, 1, 2, 2, 3, 3, 4, 4] - >>> list(repeat_generator(xrange(5), 3)) + >>> list(repeat_generator(irange(5), 3)) [0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 4] """ @@ -256,7 +272,7 @@ def repeat_generator(a_iter, n=1): assert ((n % 1) == 0), "n must be an integer, but got n = " + repr(n) for each_value in a_iter: - for i in xrange(n): + for i in irange(n): yield(each_value) @@ -274,22 +290,22 @@ def cycle_generator(a_iter, n=1): a certain number of times. Examples: - >>> cycle_generator(xrange(5)) #doctest: +ELLIPSIS + >>> cycle_generator(irange(5)) #doctest: +ELLIPSIS - >>> list(cycle_generator(xrange(0))) + >>> list(cycle_generator(irange(0))) [] - >>> list(cycle_generator(xrange(5), 0)) + >>> list(cycle_generator(irange(5), 0)) [] - >>> list(cycle_generator(xrange(5), 1)) + >>> list(cycle_generator(irange(5), 1)) [0, 1, 2, 3, 4] - >>> list(cycle_generator(xrange(5), 2)) + >>> list(cycle_generator(irange(5), 2)) [0, 1, 2, 3, 4, 0, 1, 2, 3, 4] - >>> list(cycle_generator(xrange(5), 3)) + >>> list(cycle_generator(irange(5), 3)) [0, 1, 2, 3, 4, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4] """ @@ -320,28 +336,28 @@ def iter_with_skip_indices(a_iter, to_skip=None): indices in to_skip. Examples: - >>> iter_with_skip_indices(xrange(10)) #doctest: +ELLIPSIS + >>> iter_with_skip_indices(irange(10)) #doctest: +ELLIPSIS - >>> list(iter_with_skip_indices(xrange(10))) + >>> list(iter_with_skip_indices(irange(10))) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - >>> list(iter_with_skip_indices(xrange(10), to_skip = 2)) + >>> list(iter_with_skip_indices(irange(10), to_skip = 2)) [0, 1, 3, 4, 5, 6, 7, 8, 9] - >>> list(iter_with_skip_indices(xrange(1, 10), to_skip = 2)) + >>> list(iter_with_skip_indices(irange(1, 10), to_skip = 2)) [1, 2, 4, 5, 6, 7, 8, 9] - >>> list(iter_with_skip_indices(xrange(10), to_skip = [2, 7])) + >>> list(iter_with_skip_indices(irange(10), to_skip = [2, 7])) [0, 1, 3, 4, 5, 6, 8, 9] - >>> list(iter_with_skip_indices(xrange(10), to_skip = [0])) + >>> list(iter_with_skip_indices(irange(10), to_skip = [0])) [1, 2, 3, 4, 5, 6, 7, 8, 9] - >>> list(iter_with_skip_indices(xrange(1, 10), to_skip = [0])) + >>> list(iter_with_skip_indices(irange(1, 10), to_skip = [0])) [2, 3, 4, 5, 6, 7, 8, 9] - >>> list(iter_with_skip_indices(xrange(10), to_skip = [9])) + >>> list(iter_with_skip_indices(irange(10), to_skip = [9])) [0, 1, 2, 3, 4, 5, 6, 7, 8] """ @@ -386,28 +402,28 @@ def iter_with_skip_values(a_iter, to_skip=None): >>> iter_with_skip_values(10) #doctest: +ELLIPSIS - >>> list(iter_with_skip_values(xrange(10))) + >>> list(iter_with_skip_values(irange(10))) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - >>> list(iter_with_skip_values(xrange(10))) + >>> list(iter_with_skip_values(irange(10))) [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] - >>> list(iter_with_skip_values(xrange(1, 10))) + >>> list(iter_with_skip_values(irange(1, 10))) [1, 2, 3, 4, 5, 6, 7, 8, 9] - >>> list(iter_with_skip_values(xrange(0, 10, 2))) + >>> list(iter_with_skip_values(irange(0, 10, 2))) [0, 2, 4, 6, 8] - >>> list(iter_with_skip_values(xrange(1, 10, 2))) + >>> list(iter_with_skip_values(irange(1, 10, 2))) [1, 3, 5, 7, 9] - >>> list(iter_with_skip_values(xrange(10), to_skip = 2)) + >>> list(iter_with_skip_values(irange(10), to_skip = 2)) [0, 1, 3, 4, 5, 6, 7, 8, 9] - >>> list(iter_with_skip_values(xrange(1, 10), to_skip = 2)) + >>> list(iter_with_skip_values(irange(1, 10), to_skip = 2)) [1, 3, 4, 5, 6, 7, 8, 9] - >>> list(iter_with_skip_values(xrange(0, 10, 2), to_skip = [2,6])) + >>> list(iter_with_skip_values(irange(0, 10, 2), to_skip = [2,6])) [0, 4, 8] """ @@ -429,23 +445,23 @@ def iter_with_skip_values(a_iter, to_skip=None): @prof.log_call(trace_logger) def xrange_with_skip(start, stop=None, step=None, to_skip=None): """ - Behaves as xrange does except allows for skipping arbitrary values, as + Behaves as irange does except allows for skipping arbitrary values, as well. These values to be skipped should be specified using some iterable. Args: - start(int): start for xrange or if stop is not specified + start(int): start for irange or if stop is not specified this will be stop. - stop(int): stop for xrange. + stop(int): stop for irange. - stop(int): step for xrange. + stop(int): step for irange. to_skip(iter): some form of iterable or list of elements to skip (can be a single value as well). Returns: - (generator object): an xrange-like generator that skips some + (generator object): an irange-like generator that skips some values. Examples: @@ -486,11 +502,11 @@ def xrange_with_skip(start, stop=None, step=None, to_skip=None): full = None if (stop is None): - full = iter(xrange(start)) + full = iter(irange(start)) elif (step is None): - full = iter(xrange(start, stop)) + full = iter(irange(start, stop)) else: - full = iter(xrange(start, stop, step)) + full = iter(irange(start, stop, step)) if to_skip is None: to_skip = iter([]) @@ -511,7 +527,7 @@ def xrange_with_skip(start, stop=None, step=None, to_skip=None): def splitting_xrange(a, b=None): """ - Similar to xrange except that it recursively proceeds through the given + Similar to irange except that it recursively proceeds through the given range in such a way that values that follow each other are preferably not only non-sequential, but fairly different. This does not always work with small ranges, but works nicely with large ranges. @@ -563,7 +579,7 @@ def splitting_xrange_helper(a, b): if a < mid_1 and b > mid_2: yield(mid_2) - for _1, _2 in itertools.izip( + for _1, _2 in izip( splitting_xrange_helper(a, mid_1), splitting_xrange_helper(mid_2, b) ): @@ -611,32 +627,29 @@ def subrange(start, stop=None, step=None, substep=None): substep(int): Step within each range Yields: - (xrange): A subrange within the larger range. + (irange): A subrange within the larger range. Examples: >>> subrange(5) # doctest: +ELLIPSIS - >>> list(subrange(5)) - [xrange(1), xrange(1, 2), xrange(2, 3), xrange(3, 4), xrange(4, 5)] + >>> list(map(list, subrange(5))) + [[0], [1], [2], [3], [4]] - >>> list(subrange(0, 5)) - [xrange(1), xrange(1, 2), xrange(2, 3), xrange(3, 4), xrange(4, 5)] + >>> list(map(list, subrange(0, 5))) + [[0], [1], [2], [3], [4]] - >>> list(subrange(1, 5)) - [xrange(1, 2), xrange(2, 3), xrange(3, 4), xrange(4, 5)] + >>> list(map(list, subrange(1, 5))) + [[1], [2], [3], [4]] - >>> list(subrange(0, 10, 3)) - [xrange(3), xrange(3, 6), xrange(6, 9), xrange(9, 10)] + >>> list(map(list, subrange(0, 10, 3))) + [[0, 1, 2], [3, 4, 5], [6, 7, 8], [9]] - >>> list(subrange(0, 7, 3)) - [xrange(3), xrange(3, 6), xrange(6, 7)] + >>> list(map(list, subrange(0, 7, 3))) + [[0, 1, 2], [3, 4, 5], [6]] - >>> [xrange(0, 3, 2), xrange(3, 6, 2), xrange(6, 7, 2)] - [xrange(0, 4, 2), xrange(3, 7, 2), xrange(6, 8, 2)] - - >>> list(subrange(0, 7, 3, 2)) - [xrange(0, 4, 2), xrange(3, 7, 2), xrange(6, 8, 2)] + >>> list(map(list, subrange(0, 7, 3, 2))) + [[0, 2], [3, 5], [6]] """ if stop is None: @@ -649,10 +662,10 @@ def subrange(start, stop=None, step=None, substep=None): if substep is None: substep = 1 - range_ends = itertools.chain(xrange(start, stop, step), [stop]) + range_ends = itertools.chain(irange(start, stop, step), [stop]) for i, j in lagged_generators_zipped(range_ends): - yield(xrange(i, j, substep)) + yield(irange(i, j, substep)) @prof.log_call(trace_logger) @@ -661,7 +674,7 @@ def cumulative_generator(new_op, new_iter): Takes each value from new_iter and applies new_op to it with the result of previous values. - For instance cumulative_generator(op.mul, xrange(1,5)) will return all + For instance cumulative_generator(op.mul, irange(1,5)) will return all factorials up to and including the factorial of 4 (24). Args: @@ -680,16 +693,16 @@ def cumulative_generator(new_op, new_iter): >>> cumulative_generator(operator.add, 10) #doctest: +ELLIPSIS - >>> list(cumulative_generator(operator.add, xrange(1,5))) + >>> list(cumulative_generator(operator.add, irange(1,5))) [1, 3, 6, 10] - >>> list(cumulative_generator(operator.add, xrange(5))) + >>> list(cumulative_generator(operator.add, irange(5))) [0, 1, 3, 6, 10] - >>> list(cumulative_generator(operator.mul, xrange(5))) + >>> list(cumulative_generator(operator.mul, irange(5))) [0, 0, 0, 0, 0] - >>> list(cumulative_generator(operator.mul, xrange(1,5))) + >>> list(cumulative_generator(operator.mul, irange(1,5))) [1, 2, 6, 24] """ @@ -717,11 +730,11 @@ def reverse_each_element(new_iter): Examples: >>> reverse_each_element( - ... zip(xrange(5, 11), xrange(5)) + ... zip(irange(5, 11), irange(5)) ... ) #doctest: +ELLIPSIS - >>> list(reverse_each_element(zip(xrange(5, 11), xrange(5)))) + >>> list(reverse_each_element(zip(irange(5, 11), irange(5)))) [(0, 5), (1, 6), (2, 7), (3, 8), (4, 9)] >>> list(reverse_each_element(iter([[5]]))) @@ -755,25 +768,25 @@ def lagged_generators(new_iter, n=2): step in front of the others. Examples: - >>> lagged_generators(xrange(5), 1) #doctest: +ELLIPSIS - (,) + >>> lagged_generators(irange(5), 1) #doctest: +ELLIPSIS + (,) - >>> zip(*lagged_generators(xrange(5), 1)) + >>> list(izip(*lagged_generators(irange(5), 1))) [(0,), (1,), (2,), (3,), (4,)] - >>> zip(*lagged_generators(xrange(5), 2)) + >>> list(izip(*lagged_generators(irange(5), 2))) [(0, 1), (1, 2), (2, 3), (3, 4)] - >>> zip(*lagged_generators(xrange(5))) + >>> list(izip(*lagged_generators(irange(5)))) [(0, 1), (1, 2), (2, 3), (3, 4)] - >>> zip(*lagged_generators(xrange(5), 3)) + >>> list(izip(*lagged_generators(irange(5), 3))) [(0, 1, 2), (1, 2, 3), (2, 3, 4)] - >>> list(itertools.izip_longest(*lagged_generators(xrange(5)))) + >>> list(izip_longest(*lagged_generators(irange(5)))) [(0, 1), (1, 2), (2, 3), (3, 4), (4, None)] - >>> list(itertools.izip_longest(*lagged_generators(xrange(5), 3))) + >>> list(izip_longest(*lagged_generators(irange(5), 3))) [(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, None), (4, None, None)] """ @@ -788,7 +801,7 @@ def lagged_generators(new_iter, n=2): if n > 0: # Convert to the same type next_iter = itertools.tee(new_iter, 1)[0] - for i in xrange(1, n): + for i in irange(1, n): # Duplicate the iterator prev_iter, next_iter = itertools.tee(next_iter, 2) @@ -832,25 +845,25 @@ def lagged_generators_zipped(new_iter, n=2, longest=False, fillvalue=None): values from each iterator. Examples: - >>> lagged_generators_zipped(xrange(5), 1) #doctest: +ELLIPSIS - + >>> isinstance(lagged_generators_zipped(irange(5), 1), izip) + True - >>> list(lagged_generators_zipped(xrange(5))) + >>> list(lagged_generators_zipped(irange(5))) [(0, 1), (1, 2), (2, 3), (3, 4)] - >>> list(lagged_generators_zipped(xrange(5), 1)) + >>> list(lagged_generators_zipped(irange(5), 1)) [(0,), (1,), (2,), (3,), (4,)] - >>> list(lagged_generators_zipped(xrange(5), 2)) + >>> list(lagged_generators_zipped(irange(5), 2)) [(0, 1), (1, 2), (2, 3), (3, 4)] - >>> list(lagged_generators_zipped(xrange(5), 3)) + >>> list(lagged_generators_zipped(irange(5), 3)) [(0, 1, 2), (1, 2, 3), (2, 3, 4)] - >>> list(lagged_generators_zipped(xrange(5), longest=True)) + >>> list(lagged_generators_zipped(irange(5), longest=True)) [(0, 1), (1, 2), (2, 3), (3, 4), (4, None)] - >>> list(lagged_generators_zipped(xrange(5), 3, True)) + >>> list(lagged_generators_zipped(irange(5), 3, True)) [(0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, None), (4, None, None)] """ @@ -858,9 +871,9 @@ def lagged_generators_zipped(new_iter, n=2, longest=False, fillvalue=None): zipped_iters = None if longest: - zipped_iters = itertools.izip_longest(*all_iters, fillvalue=fillvalue) + zipped_iters = izip_longest(*all_iters, fillvalue=fillvalue) else: - zipped_iters = itertools.izip(*all_iters) + zipped_iters = izip(*all_iters) return(zipped_iters) @@ -884,7 +897,7 @@ def filled_stringify_numbers(new_iter, include_numbers=False): >>> list(filled_stringify_numbers([])) [] - >>> list(filled_stringify_numbers(xrange(5))) + >>> list(filled_stringify_numbers(irange(5))) ['0', '1', '2', '3', '4'] >>> list(filled_stringify_numbers([5])) @@ -953,10 +966,10 @@ def filled_stringify_xrange(new_iter): >>> list(filled_stringify_xrange([])) [] - >>> list(filled_stringify_xrange(xrange(5))) + >>> list(filled_stringify_xrange(irange(5))) [(0, '0'), (1, '1'), (2, '2'), (3, '3'), (4, '4')] - >>> list(filled_stringify_xrange(xrange(2, 5))) + >>> list(filled_stringify_xrange(irange(2, 5))) [(0, '0'), (1, '1'), (2, '2')] >>> list(filled_stringify_xrange([5])) @@ -968,17 +981,17 @@ def filled_stringify_xrange(new_iter): >>> list(filled_stringify_xrange(iter([5, 7]))) [(0, '0'), (1, '1')] - >>> list(filled_stringify_xrange(range(11))) + >>> list(filled_stringify_xrange(list(irange(11)))) [(0, '00'), (1, '01'), (2, '02'), (3, '03'), (4, '04'), (5, '05'), (6, '06'), (7, '07'), (8, '08'), (9, '09'), (10, '10')] """ new_list = new_iter new_list_index_gen = None try: - new_list_index_gen = xrange(len(new_list)) + new_list_index_gen = irange(len(new_list)) except TypeError: new_list = list(new_list) - new_list_index_gen = xrange(len(new_list)) + new_list_index_gen = irange(len(new_list)) new_list_index_gen_stringified = filled_stringify_numbers( new_list_index_gen, include_numbers=True @@ -1006,10 +1019,10 @@ def filled_stringify_enumerate(new_iter): >>> list(filled_stringify_enumerate([])) [] - >>> list(filled_stringify_enumerate(xrange(5))) + >>> list(filled_stringify_enumerate(irange(5))) [(0, '0', 0), (1, '1', 1), (2, '2', 2), (3, '3', 3), (4, '4', 4)] - >>> list(filled_stringify_enumerate(xrange(2, 5))) + >>> list(filled_stringify_enumerate(irange(2, 5))) [(0, '0', 2), (1, '1', 3), (2, '2', 4)] >>> list(filled_stringify_enumerate([5])) @@ -1028,16 +1041,16 @@ def filled_stringify_enumerate(new_iter): new_list = new_iter new_list_index_gen = None try: - new_list_index_gen = xrange(len(new_list)) + new_list_index_gen = irange(len(new_list)) except TypeError: new_list = list(new_list) - new_list_index_gen = xrange(len(new_list)) + new_list_index_gen = irange(len(new_list)) new_list_index_gen_stringified = filled_stringify_numbers( new_list_index_gen, include_numbers=True ) - for (i, i_str), each in itertools.izip(new_list_index_gen_stringified, new_list): + for (i, i_str), each in izip(new_list_index_gen_stringified, new_list): yield ((i, i_str, each)) @@ -1368,7 +1381,7 @@ def len_slices(slices, lengths=None): ... slice(3, None), ... slice(None, 5), ... slice(None, None, 2) - ... )) + ... )) #doctest: +IGNORE_EXCEPTION_DETAIL Traceback (most recent call last): UnknownSliceLengthException: Cannot determine slice length without a defined end point. The reformatted slice was slice(0, None, 1). diff --git a/nanshe/util/prof.py b/nanshe/util/prof.py index edda616e..f6a3f0ca 100644 --- a/nanshe/util/prof.py +++ b/nanshe/util/prof.py @@ -34,7 +34,7 @@ import psutil -import wrappers +from nanshe.util import wrappers diff --git a/nanshe/util/wrappers.py b/nanshe/util/wrappers.py index 641a69e6..6605a932 100644 --- a/nanshe/util/wrappers.py +++ b/nanshe/util/wrappers.py @@ -28,7 +28,6 @@ import collections import inspect -import itertools import functools import types @@ -476,8 +475,8 @@ def repack_call_args(a_callable, *args, **kwargs): a_callable, *args, **kwargs ) - new_args = tuple(callargs.values()[:len(args)]) + new_args - new_kwargs.update(dict(callargs.items()[len(args):])) + new_args = tuple(list(callargs.values())[:len(args)]) + new_args + new_kwargs.update(dict(list(callargs.items())[len(args):])) return(new_args, new_kwargs) @@ -527,9 +526,12 @@ def with_setup_state_wrapper(a_callable, setup=setup, teardown=teardown): teardown globals. """ - stage_dict = {"setup": setup, "teardown": teardown} + stage_dict = collections.OrderedDict([ + ("setup", setup), + ("teardown", teardown) + ]) stage_orderer = [(lambda a, b: (a, b)), (lambda a, b: (b, a))] - stage_itr = itertools.izip(stage_dict.items(), stage_orderer) + stage_itr = zip(reversed(stage_dict.items()), stage_orderer) for (each_stage_name, each_new_stage), each_stage_orderer in stage_itr: each_old_stage = getattr(a_callable, each_stage_name, None) diff --git a/nanshe/util/xglob.py b/nanshe/util/xglob.py index 8a9625fe..b8a08dc2 100644 --- a/nanshe/util/xglob.py +++ b/nanshe/util/xglob.py @@ -23,7 +23,7 @@ import glob -import prof +from nanshe.util import prof diff --git a/nanshe/util/xnumpy.py b/nanshe/util/xnumpy.py index 2f515afc..44248ea4 100644 --- a/nanshe/util/xnumpy.py +++ b/nanshe/util/xnumpy.py @@ -44,11 +44,11 @@ import vigra -import iters +from nanshe.util import iters # Need in order to have logging information no matter what. -import prof +from nanshe.util import prof # Get the logger @@ -329,7 +329,7 @@ def add_singleton_axis_pos(a_array, axis=0): axis %= (a_array.ndim + 1) # Constructing the current ordering of axis and the singleton dime - new_array_shape = range(1, a_array.ndim + 1) + new_array_shape = list(iters.irange(1, a_array.ndim + 1)) new_array_shape.insert(axis, 0) new_array_shape = tuple(new_array_shape) @@ -517,7 +517,7 @@ def squish(new_array, axis=None, keepdims=False): # Convert the axes into a standard format that we can work with. axes = axis if axes is None: - axes = range(new_array.ndim) + axes = list(iters.irange(new_array.ndim)) else: # If axes is some kind of iterable, convert it to a list. # If not assume, it is a single value. @@ -527,7 +527,7 @@ def squish(new_array, axis=None, keepdims=False): axes = [axes] # Correct axes to be within the range [0, new_array.ndim). - for i in xrange(len(axes)): + for i in iters.irange(len(axes)): axes[i] %= new_array.ndim axes = tuple(axes) @@ -764,7 +764,7 @@ def unsquish(new_array, shape, axis=None): # Convert the axes into a standard format that we can work with. axes = axis if axes is None: - axes = range(0, len(shape)) + axes = list(iters.irange(0, len(shape))) else: # If axes is some kind of iterable, convert it to a list. # If not assume, it is a single value. @@ -774,7 +774,7 @@ def unsquish(new_array, shape, axis=None): axes = [axes] # Correct axes to be within the range [0, len(shape)). - for i in xrange(len(axes)): + for i in iters.irange(len(axes)): axes[i] %= len(shape) axes = tuple(axes) @@ -803,16 +803,16 @@ def unsquish(new_array, shape, axis=None): iters.xrange_with_skip(len(shape), to_skip=axes), axes ) # Get the current axis order (i.e. in order) - current_axis_order_iter = xrange(len(shape)) + current_axis_order_iter = iters.irange(len(shape)) # Find how the old order relates to the new one axis_order_map = dict( - itertools.izip(old_axis_order_iter, current_axis_order_iter) + iters.izip(old_axis_order_iter, current_axis_order_iter) ) # Export how the new order will be changed # (as the old axis order will be how to transform the axes). - axis_order = tuple(axis_order_map.itervalues()) + axis_order = tuple(axis_order_map.values()) # Put all axes not part of the group in front and # stuff the rest at the back. @@ -1154,7 +1154,7 @@ def roll(new_array, shift, out=None, to_mask=False): out.mask = numpy.ma.getmaskarray(out) - for i in xrange(len(shift)): + for i in iters.irange(len(shift)): if (shift[i] != 0): out[:] = numpy.roll(out, shift[i], i) @@ -2356,7 +2356,7 @@ def enumerate_masks_max(new_masks, axis=0): dtype=numpy.uint64 ) - for i in xrange(new_masks.shape[axis]): + for i in iters.irange(new_masks.shape[axis]): i = new_enumerated_masks_max.dtype.type(i) one = new_enumerated_masks_max.dtype.type(1) numpy.maximum( @@ -2469,11 +2469,11 @@ def cartesian_product(arrays): [0, 0, 3]]) """ - for i in xrange(len(arrays)): + for i in iters.irange(len(arrays)): assert (arrays[ i].ndim == 1), "Must provide only 1D arrays to this function or a single 2D array." - array_shapes = tuple(len(arrays[i]) for i in xrange(len(arrays))) + array_shapes = tuple(len(arrays[i]) for i in iters.irange(len(arrays))) result_shape = [0, 0] result_shape[0] = numpy.product(array_shapes) @@ -2481,11 +2481,11 @@ def cartesian_product(arrays): result_shape = tuple(result_shape) result_dtype = numpy.find_common_type( - [arrays[i].dtype for i in xrange(result_shape[1])], [] + [arrays[i].dtype for i in iters.irange(result_shape[1])], [] ) result = numpy.empty(result_shape, dtype=result_dtype) - for i in xrange(result.shape[1]): + for i in iters.irange(result.shape[1]): repeated_array_i = expand_view( arrays[i], reps_before=array_shapes[:i], @@ -2581,13 +2581,13 @@ def truncate_masked_frames(shifted_frames): # Find the shape shifted_frames_mask_shape = tuple(shifted_frames_mask.sum( - axis=_i).max() for _i in xrange(shifted_frames_mask.ndim) + axis=_i).max() for _i in iters.irange(shifted_frames_mask.ndim) ) shifted_frames_mask_shape = (len(shifted_frames),) + shifted_frames_mask_shape # Assert that this is an acceptable mask #shifted_frames_mask_upper_offset = tuple( - # (shifted_frames_mask.sum(axis=_i) != 0).argmax() for _i in reversed(xrange(shifted_frames_mask.ndim)) + # (shifted_frames_mask.sum(axis=_i) != 0).argmax() for _i in reversed(iters.irange(shifted_frames_mask.ndim)) #) #shifted_frames_mask_lower_offset = tuple( # numpy.array(shifted_frames_mask.shape) - \ @@ -2597,7 +2597,7 @@ def truncate_masked_frames(shifted_frames): # #shifted_frames_mask_reconstructed = numpy.pad( # numpy.ones(shifted_frames_mask_shape[1:], dtype=bool), - # [(_d, _e) for _d, _e in itertools.izip(shifted_frames_mask_upper_offset, shifted_frames_mask_lower_offset)], + # [(_d, _e) for _d, _e in iters.izip(shifted_frames_mask_upper_offset, shifted_frames_mask_lower_offset)], # "constant" #) #assert( @@ -3133,7 +3133,7 @@ def blocks_split(space_shape, block_shape, block_halo=None): haloed_ranges_per_dim = [] trimmed_halos_per_dim = [] - for each_dim in xrange(len(space_shape)): + for each_dim in iters.irange(len(space_shape)): # Construct each block using the block size given. Allow to spill over. if block_shape[each_dim] == -1: block_shape[each_dim] = space_shape[each_dim] @@ -3161,13 +3161,13 @@ def blocks_split(space_shape, block_shape, block_halo=None): # Convert all ranges to slices for easier use. a_range = iters.reformat_slices([ - slice(*a_range[i]) for i in xrange(len(a_range)) + slice(*a_range[i]) for i in iters.irange(len(a_range)) ]) a_range_haloed = iters.reformat_slices([ - slice(*a_range_haloed[i]) for i in xrange(len(a_range_haloed)) + slice(*a_range_haloed[i]) for i in iters.irange(len(a_range_haloed)) ]) a_trimmed_halo = iters.reformat_slices([ - slice(*a_trimmed_halo[i]) for i in xrange(len(a_trimmed_halo)) + slice(*a_trimmed_halo[i]) for i in iters.irange(len(a_trimmed_halo)) ]) # Collect all blocks @@ -3568,11 +3568,11 @@ def unique_mapping(mapping, out=None): out[:] = mapping injective_into = list() - for i in xrange(mapping.ndim): + for i in iters.irange(mapping.ndim): injective_into_i = (add_singleton_op(numpy.sum, mapping, i) == 1) injective_into.append(injective_into_i) - for i in xrange(mapping.ndim): + for i in iters.irange(mapping.ndim): out *= injective_into[i] return(out) @@ -4124,14 +4124,14 @@ def matrix_reduced_op(a, b, op): assert (a.ndim == b.ndim) - for i in xrange(1, a.ndim): + for i in iters.irange(1, a.ndim): assert (a.shape[i] == b.shape[i]) out = numpy.empty( (len(a), len(b)), dtype=numpy.promote_types(a.dtype, b.dtype) ) - for i, j in itertools.product(xrange(out.shape[0]), xrange(out.shape[1])): + for i, j in itertools.product(iters.irange(out.shape[0]), iters.irange(out.shape[1])): out[i, j] = op(a[i], b[j]) return(out) @@ -4204,7 +4204,7 @@ def masks_intersection(a, b): out = numpy.empty((len(a), len(b)), dtype=numpy.uint64) - for i, j in itertools.product(xrange(out.shape[0]), xrange(out.shape[1])): + for i, j in itertools.product(iters.irange(out.shape[0]), iters.irange(out.shape[1])): out[i, j] = (a[i] & b[j]).sum() return(out) @@ -4274,7 +4274,7 @@ def masks_union(a, b): out = numpy.empty((len(a), len(b)), dtype=numpy.uint64) - for i, j in itertools.product(xrange(out.shape[0]), xrange(out.shape[1])): + for i, j in itertools.product(iters.irange(out.shape[0]), iters.irange(out.shape[1])): out[i, j] = (a[i] | b[j]).sum() return(out) diff --git a/nanshe/viewer.py b/nanshe/viewer.py index 93557559..6f0e1b6c 100644 --- a/nanshe/viewer.py +++ b/nanshe/viewer.py @@ -74,7 +74,6 @@ from nanshe.util import iters, xnumpy - class HDF5DatasetNotFoundException(Exception): """ An exception raised when a dataset is not found in an HDF5 file. @@ -411,7 +410,7 @@ def __init__(self, file_handle, dataset_path, axis_order, dataset_dtype, slicing # To construct the list requires a second pass either way. self.slicing = list() actual_slicing_dict = dict() - for i, (each_slice, each_axis) in enumerate(itertools.izip(slicing, self.axis_order)): + for i, (each_slice, each_axis) in enumerate(iters.izip(slicing, self.axis_order)): self.slicing.append(each_slice) if each_axis != -1: actual_slicing_dict[each_axis] = each_slice @@ -741,7 +740,7 @@ def request(self, slicing): fuse_slicing = None non_fuse_slicing = [] - for i, (each_slicing, each_len) in enumerate(itertools.izip(slicing, self.data_shape)): + for i, (each_slicing, each_len) in enumerate(iters.izip(slicing, self.data_shape)): each_slicing_formatted = None if i == self.fuse_axis: each_len = len(self.data_sources) @@ -1161,7 +1160,7 @@ def __init__(self, constant_source, axis=-1): self._constant_source_cached = self.constant_source.request( slicing ).wait() - for i in xrange(self._constant_source_cached.shape[self.axis]): + for i in iters.irange(self._constant_source_cached.shape[self.axis]): _constant_source_cached_i = xnumpy.index_axis_at_pos( self._constant_source_cached, self.axis, i ) @@ -2010,7 +2009,7 @@ def main(*argv): # Open all of the files and store their handles parsed_args.file_handles = [] - for i in xrange(len(parsed_args.input_files)): + for i in iters.irange(len(parsed_args.input_files)): parsed_args.input_files[i] = parsed_args.input_files[i].rstrip("/") parsed_args.input_files[i] = os.path.abspath( parsed_args.input_files[i] @@ -2024,7 +2023,7 @@ def main(*argv): # whether they were or not before. The key will be the operation to # perform and the values will be what to perform the operation on. # If the key is a null string, no operation is performed. - for i in xrange(len(parsed_args.parameters)): + for i in iters.irange(len(parsed_args.parameters)): for (each_layer_name, each_layer_source_location_dict) in parsed_args.parameters[i].items(): each_layer_source_operation_names = [] each_layer_source_location_list = [] @@ -2033,9 +2032,9 @@ def main(*argv): while isinstance(each_layer_source_location_dict_inner, dict): assert (len(each_layer_source_location_dict_inner) == 1) each_layer_source_operation_names.extend( - each_layer_source_location_dict_inner.keys() + list(each_layer_source_location_dict_inner.keys()) ) - each_layer_source_location_dict_inner = each_layer_source_location_dict_inner.values()[0] + each_layer_source_location_dict_inner = list(each_layer_source_location_dict_inner.values())[0] if isinstance(each_layer_source_location_dict_inner, str): each_layer_source_location_dict_inner = [ @@ -2076,7 +2075,7 @@ def main(*argv): new_matches = hdf5.search.get_matching_grouped_paths( each_file, each_layer_source_location ) - new_matches_ldict = itertools.izip( + new_matches_ldict = iters.izip( new_matches, itertools.repeat(None) ) parsed_args.parameters_expanded[-1][each_layer_name][-1].update(new_matches_ldict) @@ -2196,7 +2195,7 @@ def main(*argv): # Close and clean up files parsed_args.file_handles = [] - for i in xrange(len(parsed_args.file_handles)): + for i in iters.irange(len(parsed_args.file_handles)): parsed_args.file_handles[i].close() parsed_args.file_handles[i] = None diff --git a/setup.py b/setup.py index 20b40350..fc371c35 100644 --- a/setup.py +++ b/setup.py @@ -58,10 +58,7 @@ def run_tests(self): "mahotas", "vigra", "spams", - "rank_filter", - "functools32", - "pyqt", - "volumina" + "rank_filter" ] install_requires = [ @@ -81,11 +78,25 @@ def run_tests(self): "mahotas", "vigra", "spams", - "rank_filter", - "functools32", - "pyqt", - "volumina" + "rank_filter" ] + + if sys.version_info < (3, 2): + build_requires += [ + "functools32" + ] + install_requires += [ + "functools32" + ] + if sys.version_info < (3,): + build_requires += [ + "pyqt", + "volumina" + ] + install_requires += [ + "pyqt", + "volumina" + ] elif sys.argv[1] == "build_sphinx": import sphinx.apidoc @@ -157,7 +168,7 @@ def run_tests(self): py_modules=["versioneer"], packages=find_packages(exclude=["tests*"]), distclass=Distribution, - cmdclass=dict(sum([_.items() for _ in [ + cmdclass=dict(sum([list(_.items()) for _ in [ versioneer.get_cmdclass(), {"test": NoseTestCommand} ]], [])), diff --git a/tests/__init__.py b/tests/__init__.py index a9ebe4b3..8965215b 100644 --- a/tests/__init__.py +++ b/tests/__init__.py @@ -4,4 +4,4 @@ __all__ = ["test_nanshe"] -import test_nanshe +from tests import test_nanshe diff --git a/tests/test_nanshe/__init__.py b/tests/test_nanshe/__init__.py index b971b129..0c4032bf 100644 --- a/tests/test_nanshe/__init__.py +++ b/tests/test_nanshe/__init__.py @@ -7,11 +7,11 @@ ] -import test_box -import test_converter -import test_io -import test_imp -import test_learner -import test_registerer -import test_util -# import test_viewer +from tests.test_nanshe import test_box +from tests.test_nanshe import test_converter +from tests.test_nanshe import test_io +from tests.test_nanshe import test_imp +from tests.test_nanshe import test_learner +from tests.test_nanshe import test_registerer +from tests.test_nanshe import test_util +# from tests.test_nanshe import test_viewer diff --git a/tests/test_nanshe/test_box/__init__.py b/tests/test_nanshe/test_box/__init__.py index 0709f613..7f2549bc 100644 --- a/tests/test_nanshe/test_box/__init__.py +++ b/tests/test_nanshe/test_box/__init__.py @@ -4,4 +4,4 @@ __all__ = ["test_spams_sandbox"] -import test_spams_sandbox +from tests.test_nanshe.test_box import test_spams_sandbox diff --git a/tests/test_nanshe/test_box/test_spams_sandbox.py b/tests/test_nanshe/test_box/test_spams_sandbox.py index 4d038890..153b39f8 100644 --- a/tests/test_nanshe/test_box/test_spams_sandbox.py +++ b/tests/test_nanshe/test_box/test_spams_sandbox.py @@ -18,6 +18,12 @@ import nanshe.syn.data +try: + xrange +except NameError: + xrange = range + + class TestSpamsSandbox(object): def setup(self): self.p = numpy.array([[27, 51], diff --git a/tests/test_nanshe/test_converter.py b/tests/test_nanshe/test_converter.py index 947daa90..d361b5b0 100644 --- a/tests/test_nanshe/test_converter.py +++ b/tests/test_nanshe/test_converter.py @@ -3,7 +3,6 @@ import collections -import itertools import json import os import os.path @@ -33,9 +32,13 @@ def setup(self): self.temp_dir = tempfile.mkdtemp() for i, i_str, (a_b, a_e) in nanshe.util.iters.filled_stringify_enumerate( - itertools.izip( + nanshe.util.iters.izip( *nanshe.util.iters.lagged_generators( - xrange(0, self.data.shape[0] + 100 - 1, 100) + nanshe.util.iters.irange( + 0, + self.data.shape[0] + 100 - 1, + 100 + ) ) ) ): @@ -65,7 +68,7 @@ def test_main(self): json.dump(params, fid) fid.write("\n") - main_args = ["./converter.py"] + ["tiff"] + [config_filename] + self.filedata.keys() + [hdf5_filepath] + main_args = ["./converter.py"] + ["tiff"] + [config_filename] + list(self.filedata.keys()) + [hdf5_filepath] assert (nanshe.converter.main(*main_args) == 0) diff --git a/tests/test_nanshe/test_imp/__init__.py b/tests/test_nanshe/test_imp/__init__.py index a02ba063..a7b254fc 100644 --- a/tests/test_nanshe/test_imp/__init__.py +++ b/tests/test_nanshe/test_imp/__init__.py @@ -6,6 +6,6 @@ ] -import test_extended_region_props -import test_registration -import test_segment +from tests.test_nanshe.test_imp import test_extended_region_props +from tests.test_nanshe.test_imp import test_registration +from tests.test_nanshe.test_imp import test_segment diff --git a/tests/test_nanshe/test_imp/test_extended_region_props.py b/tests/test_nanshe/test_imp/test_extended_region_props.py index 147f59b6..8d99644f 100644 --- a/tests/test_nanshe/test_imp/test_extended_region_props.py +++ b/tests/test_nanshe/test_imp/test_extended_region_props.py @@ -13,6 +13,7 @@ import numpy +import nanshe.util.iters import nanshe.util.xnumpy import nanshe.imp.segment @@ -57,9 +58,9 @@ def test_ExtendedRegionProps_1(self): nanshe.util.xnumpy.enumerate_masks(m).max(axis=0), properties=["label", "centroid"]) ).all() - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_index_array(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_index_array(), tuple(p.T))]) - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) assert (e.get_local_max_label_image()[e.get_local_max_label_image().nonzero()] == numpy.arange(1, len(m) + 1)).all() @@ -104,9 +105,9 @@ def test_ExtendedRegionProps_2(self): nanshe.util.xnumpy.enumerate_masks(m).max(axis=0), properties=["label", "centroid"]) ).all() - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_index_array(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_index_array(), tuple(p.T))]) - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) assert (e.get_local_max_label_image()[e.get_local_max_label_image().nonzero()] == numpy.array([1, 1, 2])).all() @@ -152,9 +153,9 @@ def test_ExtendedRegionProps_3(self): nanshe.util.xnumpy.enumerate_masks(m).max(axis=0), properties=["label", "centroid"]) ).all() - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_index_array(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_index_array(), tuple(p.T))]) - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) assert (e.get_local_max_label_image()[e.get_local_max_label_image().nonzero()] == numpy.arange(1, len(m) + 1)).all() @@ -207,9 +208,9 @@ def test_ExtendedRegionProps_4(self): nanshe.util.xnumpy.enumerate_masks(m).max(axis=0), properties=["label", "centroid"]) ).all() - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_index_array(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_index_array(), tuple(p.T))]) - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) assert (e.get_local_max_label_image()[e.get_local_max_label_image().nonzero()] == numpy.arange(1, len(m) + 1)).all() @@ -266,9 +267,9 @@ def test_ExtendedRegionProps_5(self): nanshe.util.xnumpy.enumerate_masks(m).max(axis=0), properties=["label", "centroid"]) ).all() - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_index_array(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_index_array(), tuple(p.T))]) - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) assert (e.get_local_max_label_image()[e.get_local_max_label_image().nonzero()] == numpy.arange(1, len(m) + 1)).all() @@ -310,9 +311,9 @@ def test_ExtendedRegionProps_6(self): ) ).all() - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_index_array(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_index_array(), tuple(p.T))]) - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) assert (e.get_local_max_label_image()[e.get_local_max_label_image().nonzero()] == numpy.arange(1, len(m) + 1)).all() @@ -359,9 +360,9 @@ def test_ExtendedRegionProps_7(self): ) ).all() - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_index_array(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_index_array(), tuple(p.T))]) - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) assert (e.get_local_max_label_image()[e.get_local_max_label_image().nonzero()] == numpy.array([1, 1, 2])).all() @@ -409,9 +410,9 @@ def test_ExtendedRegionProps_8(self): ) ).all() - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_index_array(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_index_array(), tuple(p.T))]) - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) assert (e.get_local_max_label_image()[e.get_local_max_label_image().nonzero()] == numpy.arange(1, len(m) + 1)).all() @@ -466,9 +467,9 @@ def test_ExtendedRegionProps_9(self): ) ).all() - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_index_array(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_index_array(), tuple(p.T))]) - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) assert (e.get_local_max_label_image()[e.get_local_max_label_image().nonzero()] == numpy.arange(1, len(m) + 1)).all() @@ -527,8 +528,8 @@ def test_ExtendedRegionProps_10(self): ) ).all() - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_index_array(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_index_array(), tuple(p.T))]) - assert all([(_1 == _2).all() for _1, _2 in itertools.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) + assert all([(_1 == _2).all() for _1, _2 in nanshe.util.iters.izip(e.get_local_max_label_image().nonzero(), tuple(p.T))]) assert (e.get_local_max_label_image()[e.get_local_max_label_image().nonzero()] == numpy.arange(1, len(m) + 1)).all() diff --git a/tests/test_nanshe/test_imp/test_registration.py b/tests/test_nanshe/test_imp/test_registration.py index c9b7bab9..05f68d77 100644 --- a/tests/test_nanshe/test_imp/test_registration.py +++ b/tests/test_nanshe/test_imp/test_registration.py @@ -17,6 +17,12 @@ import nanshe.imp.registration +try: + basestring +except NameError: + basestring = str + + class TestRegisterMeanOffsets(object): def test0a(self): a = numpy.zeros((20,10,11), dtype=int) diff --git a/tests/test_nanshe/test_imp/test_segment.py b/tests/test_nanshe/test_imp/test_segment.py index de400ac9..d04f2043 100644 --- a/tests/test_nanshe/test_imp/test_segment.py +++ b/tests/test_nanshe/test_imp/test_segment.py @@ -17,6 +17,7 @@ import scipy.stats +import nanshe.util.iters import nanshe.util.xnumpy import nanshe.imp.segment @@ -854,7 +855,7 @@ def test_generate_dictionary_00(self): unmatched_g = range(len(g)) matched = dict() - for i in xrange(len(d)): + for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): @@ -907,7 +908,7 @@ def test_generate_dictionary_01(self): unmatched_g = range(len(g)) matched = dict() - for i in xrange(len(d)): + for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): @@ -961,7 +962,7 @@ def test_generate_dictionary_02(self): unmatched_g = range(len(g)) matched = dict() - for i in xrange(len(d)): + for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): @@ -1015,7 +1016,7 @@ def test_generate_dictionary_03(self): unmatched_g = range(len(g)) matched = dict() - for i in xrange(len(d)): + for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): @@ -1069,7 +1070,7 @@ def test_generate_dictionary_04(self): unmatched_g = range(len(g)) matched = dict() - for i in xrange(len(d)): + for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): @@ -1125,7 +1126,7 @@ def test_generate_dictionary_05(self): unmatched_g = range(len(g)) matched = dict() - for i in xrange(len(d)): + for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): @@ -1182,7 +1183,7 @@ def test_generate_dictionary_06(self): unmatched_g = range(len(g)) matched = dict() - for i in xrange(len(d)): + for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): @@ -1239,7 +1240,7 @@ def test_generate_dictionary_07(self): unmatched_g = range(len(g)) matched = dict() - for i in xrange(len(d)): + for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): @@ -1286,7 +1287,7 @@ def test_generate_dictionary_08(self): unmatched_g = range(len(g)) matched = dict() - for i in xrange(len(d)): + for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): @@ -1332,7 +1333,7 @@ def test_generate_dictionary_09(self): unmatched_g = range(len(g)) matched = dict() - for i in xrange(len(d)): + for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): @@ -1378,7 +1379,7 @@ def test_generate_dictionary_10(self): unmatched_g = range(len(g)) matched = dict() - for i in xrange(len(d)): + for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): @@ -1427,7 +1428,7 @@ def test_generate_dictionary_11(self): unmatched_g = range(len(g)) matched = dict() - for i in xrange(len(d)): + for i in nanshe.util.iters.irange(len(d)): new_unmatched_g = [] for j in unmatched_g: if not (d[i] == g[j]).all(): @@ -1725,7 +1726,7 @@ def test_remove_low_intensity_local_maxima_2(self): e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) percentage_pixels_below_max = numpy.zeros((len(masks),), float) - for i in xrange(len(masks)): + for i in nanshe.util.iters.irange(len(masks)): pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum() pixels = masks[i].sum() @@ -1757,7 +1758,7 @@ def test_remove_low_intensity_local_maxima_3(self): e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) percentage_pixels_below_max = numpy.zeros((len(masks),), float) - for i in xrange(len(masks)): + for i in nanshe.util.iters.irange(len(masks)): pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum() pixels = masks[i].sum() @@ -1789,7 +1790,7 @@ def test_remove_low_intensity_local_maxima_4(self): e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) percentage_pixels_below_max = numpy.zeros((len(masks),), float) - for i in xrange(len(masks)): + for i in nanshe.util.iters.irange(len(masks)): pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum() pixels = masks[i].sum() @@ -1847,7 +1848,7 @@ def test_remove_low_intensity_local_maxima_6(self): e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) percentage_pixels_below_max = numpy.zeros((len(masks),), float) - for i in xrange(len(masks)): + for i in nanshe.util.iters.irange(len(masks)): pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum() pixels = masks[i].sum() @@ -1880,7 +1881,7 @@ def test_remove_low_intensity_local_maxima_7(self): e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) percentage_pixels_below_max = numpy.zeros((len(masks),), float) - for i in xrange(len(masks)): + for i in nanshe.util.iters.irange(len(masks)): pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum() pixels = masks[i].sum() @@ -1913,7 +1914,7 @@ def test_remove_low_intensity_local_maxima_8(self): e = nanshe.imp.segment.ExtendedRegionProps(images.max(axis=0), labels) percentage_pixels_below_max = numpy.zeros((len(masks),), float) - for i in xrange(len(masks)): + for i in nanshe.util.iters.irange(len(masks)): pixels_below_max = (images.max(axis=0)[masks[i].nonzero()] < images.max(axis=0)[masks[i]].max()).sum() pixels = masks[i].sum() @@ -2400,7 +2401,7 @@ def test_extract_neurons_1(self): circle_mask_mean = numpy.zeros((len(circle_masks), image.ndim,)) circle_mask_cov = numpy.zeros((len(circle_masks), image.ndim, image.ndim,)) - for circle_mask_i in xrange(len(circle_masks)): + for circle_mask_i in nanshe.util.iters.irange(len(circle_masks)): each_circle_mask_points = numpy.array(circle_masks[circle_mask_i].nonzero(), dtype=float) circle_mask_mean[circle_mask_i] = each_circle_mask_points.mean(axis=1) @@ -2444,7 +2445,7 @@ def test_extract_neurons_2(self): circle_mask_mean = numpy.zeros((len(circle_masks), image.ndim,)) circle_mask_cov = numpy.zeros((len(circle_masks), image.ndim, image.ndim,)) - for circle_mask_i in xrange(len(circle_masks)): + for circle_mask_i in nanshe.util.iters.irange(len(circle_masks)): each_circle_mask_points = numpy.array(circle_masks[circle_mask_i].nonzero(), dtype=float) circle_mask_mean[circle_mask_i] = each_circle_mask_points.mean(axis=1) @@ -2488,7 +2489,7 @@ def test_fuse_neurons_1(self): circle_mask_mean = numpy.zeros((len(circle_masks), image.ndim,)) circle_mask_cov = numpy.zeros((len(circle_masks), image.ndim, image.ndim,)) - for circle_mask_i in xrange(len(circle_masks)): + for circle_mask_i in nanshe.util.iters.irange(len(circle_masks)): each_circle_mask_points = numpy.array(circle_masks[circle_mask_i].nonzero(), dtype=float) circle_mask_mean[circle_mask_i] = each_circle_mask_points.mean(axis=1) @@ -2532,7 +2533,7 @@ def test_fuse_neurons_2(self): circle_mask_mean = numpy.zeros((len(circle_masks), image.ndim,)) circle_mask_cov = numpy.zeros((len(circle_masks), image.ndim, image.ndim,)) - for circle_mask_i in xrange(len(circle_masks)): + for circle_mask_i in nanshe.util.iters.irange(len(circle_masks)): each_circle_mask_points = numpy.array(circle_masks[circle_mask_i].nonzero(), dtype=float) circle_mask_mean[circle_mask_i] = each_circle_mask_points.mean(axis=1) @@ -2747,7 +2748,7 @@ def test_postprocess_data_1(self): matched = dict() unmatched_points = numpy.arange(len(points)) - for i in xrange(len(neuron_points)): + for i in nanshe.util.iters.irange(len(neuron_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_points[i] == points[j]).all(): @@ -2835,7 +2836,7 @@ def test_postprocess_data_2(self): matched = dict() unmatched_points = numpy.arange(len(points)) - for i in xrange(len(neuron_points)): + for i in nanshe.util.iters.irange(len(neuron_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_points[i] == points[j]).all(): @@ -2926,7 +2927,7 @@ def test_postprocess_data_3(self): matched = dict() unmatched_points = numpy.arange(len(points)) - for i in xrange(len(neuron_points)): + for i in nanshe.util.iters.irange(len(neuron_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_points[i] == points[j]).all(): @@ -3012,7 +3013,7 @@ def test_postprocess_data_4(self): matched = dict() unmatched_points = numpy.arange(len(points)) - for i in xrange(len(neuron_points)): + for i in nanshe.util.iters.irange(len(neuron_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_points[i] == points[j]).all(): diff --git a/tests/test_nanshe/test_io/__init__.py b/tests/test_nanshe/test_io/__init__.py index 864df678..246bcd17 100644 --- a/tests/test_nanshe/test_io/__init__.py +++ b/tests/test_nanshe/test_io/__init__.py @@ -3,6 +3,6 @@ __all__ = ["test_hdf5", "test_xjson", "test_xtiff"] -import test_hdf5 -import test_xjson -import test_xtiff +from tests.test_nanshe.test_io import test_hdf5 +from tests.test_nanshe.test_io import test_xjson +from tests.test_nanshe.test_io import test_xtiff diff --git a/tests/test_nanshe/test_io/test_hdf5/__init__.py b/tests/test_nanshe/test_io/test_hdf5/__init__.py index 2c26e761..9ef47213 100644 --- a/tests/test_nanshe/test_io/test_hdf5/__init__.py +++ b/tests/test_nanshe/test_io/test_hdf5/__init__.py @@ -4,6 +4,6 @@ __all__ = ["test_record", "test_search", "test_serializers"] -import test_record -import test_search -import test_serializers +from tests.test_nanshe.test_io.test_hdf5 import test_record +from tests.test_nanshe.test_io.test_hdf5 import test_search +from tests.test_nanshe.test_io.test_hdf5 import test_serializers diff --git a/tests/test_nanshe/test_io/test_hdf5/test_search.py b/tests/test_nanshe/test_io/test_hdf5/test_search.py index a9601c17..4c95256a 100644 --- a/tests/test_nanshe/test_io/test_hdf5/test_search.py +++ b/tests/test_nanshe/test_io/test_hdf5/test_search.py @@ -9,6 +9,8 @@ import h5py +import nanshe.util.iters + import nanshe.io.hdf5.search @@ -64,7 +66,7 @@ def test_get_matching_paths(self): assert (len(all_matched) == (len(TestHDF5Searchers.groups_0) * len(TestHDF5Searchers.groups_1) - 1)) - for _1, _2 in itertools.izip(TestHDF5Searchers.get_matching_paths_generator(), all_matched): + for _1, _2 in nanshe.util.iters.izip(TestHDF5Searchers.get_matching_paths_generator(), all_matched): assert (_1 == _2) def test_get_matching_paths_groups(self): @@ -76,10 +78,10 @@ def test_get_matching_paths_groups(self): assert (num_permutations == (len(TestHDF5Searchers.groups_0) * len(TestHDF5Searchers.groups_1))) - for _1, _2 in itertools.izip(TestHDF5Searchers.get_matching_paths_groups_generator(), all_matched): + for _1, _2 in nanshe.util.iters.izip(TestHDF5Searchers.get_matching_paths_groups_generator(), all_matched): assert (_1 == _2) - for _1, _2 in itertools.izip(TestHDF5Searchers.get_matching_grouped_paths_gen(), TestHDF5Searchers.match_path_groups_gen(all_matched)): + for _1, _2 in nanshe.util.iters.izip(TestHDF5Searchers.get_matching_grouped_paths_gen(), TestHDF5Searchers.match_path_groups_gen(all_matched)): assert (_1 == _2) def test_get_matching_grouped_paths(self): @@ -87,7 +89,7 @@ def test_get_matching_grouped_paths(self): assert (len(all_matched) == (len(TestHDF5Searchers.groups_0) * len(TestHDF5Searchers.groups_1))) - for _1, _2 in itertools.izip(TestHDF5Searchers.get_matching_grouped_paths_gen(), all_matched): + for _1, _2 in nanshe.util.iters.izip(TestHDF5Searchers.get_matching_grouped_paths_gen(), all_matched): assert (_1 == _2) def test_get_matching_grouped_paths_found(self): @@ -95,7 +97,7 @@ def test_get_matching_grouped_paths_found(self): assert (len(all_matched) == (len(TestHDF5Searchers.groups_0) * len(TestHDF5Searchers.groups_1))) - for _1, _2, _3 in itertools.izip(TestHDF5Searchers.get_matching_grouped_paths_gen(), all_matched.iterkeys(), all_matched.itervalues()): + for _1, _2, _3 in nanshe.util.iters.izip(TestHDF5Searchers.get_matching_grouped_paths_gen(), all_matched.keys(), all_matched.values()): assert (_1 == _2) assert ((_2 in self.temp_hdf5_file) == _3) diff --git a/tests/test_nanshe/test_io/test_xjson.py b/tests/test_nanshe/test_io/test_xjson.py index 0a95b2ba..7f5680ef 100644 --- a/tests/test_nanshe/test_io/test_xjson.py +++ b/tests/test_nanshe/test_io/test_xjson.py @@ -11,6 +11,17 @@ import nanshe.io.xjson +try: + unicode +except NameError: + unicode = str + +try: + xrange +except NameError: + xrange = range + + class TestXJson(object): def setup(self): self.temp_dir = tempfile.mkdtemp() @@ -19,14 +30,18 @@ def setup(self): def test0a(self): dict_type = dict - params = dict_type() - params["b"] = range(3) + params = collections.OrderedDict() + params["b"] = list(xrange(3)) params["c"] = "test" params["a"] = 5 - params["d"] = dict_type(params) - params["h"] = [dict_type(params["d"])] + params["d"] = collections.OrderedDict(params) + params["h"] = [collections.OrderedDict(params["d"])] params["g"] = [[_k, _v] for _k, _v in params["d"].items()] + params["d"] = dict_type(params["d"]) + params["h"][0] = dict_type(params["h"][0]) + params = dict_type(params) + config_filename = os.path.join(self.temp_dir, "config.json") with open(config_filename, "w") as config_file: json.dump(params, config_file) @@ -45,18 +60,22 @@ def test0a(self): def test0b(self): dict_type = dict - params = dict_type() - params["b"] = range(3) + params = collections.OrderedDict() + params["b"] = list(xrange(3)) params["b"].append("__comment__ to drop") params["c"] = "test" params["a"] = 5 - params["d"] = dict_type(params) - params["h"] = [dict_type(params["d"])] + params["d"] = collections.OrderedDict(params) + params["h"] = [collections.OrderedDict(params["d"])] params["g"] = [[_k, _v] for _k, _v in params["d"].items()] params["e"] = "__comment__ will be removed" params["__comment__ e"] = "also will be removed" params["f"] = u"will not be unicode" + params["d"] = dict_type(params["d"]) + params["h"][0] = dict_type(params["h"][0]) + params = dict_type(params) + config_filename = os.path.join(self.temp_dir, "config.json") with open(config_filename, "w") as config_file: json.dump(params, config_file) @@ -72,10 +91,12 @@ def test0b(self): params["b"] = params["b"][:-1] params["d"]["b"] = params["d"]["b"][:-1] params["h"][0]["b"] = params["h"][0]["b"][:-1] - params["g"][-1][1] = params["g"][-1][1][:-1] + params["g"][0][-1] = params["g"][0][-1][:-1] del params["e"] del params["__comment__ e"] - params["f"] = params["f"].encode("utf-8") + + if str != unicode: + params["f"] = params["f"].encode("utf-8") assert params == params_out @@ -83,14 +104,18 @@ def test0b(self): def test1a(self): dict_type = collections.OrderedDict - params = dict_type() - params["b"] = range(3) + params = collections.OrderedDict() + params["b"] = list(xrange(3)) params["c"] = "test" params["a"] = 5 - params["d"] = dict_type(params) - params["h"] = [dict_type(params["d"])] + params["d"] = collections.OrderedDict(params) + params["h"] = [collections.OrderedDict(params["d"])] params["g"] = [[_k, _v] for _k, _v in params["d"].items()] + params["d"] = dict_type(params["d"]) + params["h"][0] = dict_type(params["h"][0]) + params = dict_type(params) + config_filename = os.path.join(self.temp_dir, "config.json") with open(config_filename, "w") as config_file: json.dump(params, config_file) @@ -109,18 +134,22 @@ def test1a(self): def test1b(self): dict_type = collections.OrderedDict - params = dict_type() - params["b"] = range(3) + params = collections.OrderedDict() + params["b"] = list(xrange(3)) params["b"].append("__comment__ to drop") params["c"] = "test" params["a"] = 5 - params["d"] = dict_type(params) - params["h"] = [dict_type(params["d"])] + params["d"] = collections.OrderedDict(params) + params["h"] = [collections.OrderedDict(params["d"])] params["g"] = [[_k, _v] for _k, _v in params["d"].items()] params["e"] = "__comment__ will be removed" params["__comment__ e"] = "also will be removed" params["f"] = u"will not be unicode" + params["d"] = dict_type(params["d"]) + params["h"][0] = dict_type(params["h"][0]) + params = dict_type(params) + config_filename = os.path.join(self.temp_dir, "config.json") with open(config_filename, "w") as config_file: json.dump(params, config_file) @@ -139,7 +168,9 @@ def test1b(self): params["g"][0][1] = params["g"][0][1][:-1] del params["e"] del params["__comment__ e"] - params["f"] = params["f"].encode("utf-8") + + if str != unicode: + params["f"] = params["f"].encode("utf-8") assert params == params_out diff --git a/tests/test_nanshe/test_io/test_xtiff.py b/tests/test_nanshe/test_io/test_xtiff.py index cd746e12..f8f7ab5b 100644 --- a/tests/test_nanshe/test_io/test_xtiff.py +++ b/tests/test_nanshe/test_io/test_xtiff.py @@ -3,7 +3,6 @@ import collections -import itertools import json import os import os.path @@ -23,6 +22,12 @@ import nanshe.converter +try: + unicode +except NameError: + unicode = str + + class TestXTiff(object): def setup(self): self.temp_dir = "" @@ -33,11 +38,13 @@ def setup(self): self.data = numpy.random.random_integers(0, 255, (500, 1, 102, 101, 2)).astype(numpy.uint8) - self.offsets = list(xrange(0, self.data.shape[0] + 100 - 1, 100)) + self.offsets = list(nanshe.util.iters.irange( + 0, self.data.shape[0] + 100 - 1, 100 + )) self.temp_dir = tempfile.mkdtemp() for i, i_str, (a_b, a_e) in nanshe.util.iters.filled_stringify_enumerate( - itertools.izip( + nanshe.util.iters.izip( *nanshe.util.iters.lagged_generators( self.offsets ) @@ -117,7 +124,7 @@ def test_convert_tiffs(self): hdf5_filepath = hdf5_filename + "/data" nanshe.io.xtiff.convert_tiffs( - self.filedata.keys(), + list(self.filedata.keys()), hdf5_filepath, pages_to_channel=self.pages_to_channel ) @@ -143,7 +150,7 @@ def test_convert_tiffs(self): self_data_h5 = nanshe.util.xnumpy.tagging_reorder_array( self.data, to_axis_order="cztyx" )[0, 0] - self_filenames = numpy.array(self.filedata.keys()) + self_filenames = numpy.array(list(self.filedata.keys())) assert len(filenames) == len(self_filenames) assert (filenames == self_filenames).all() diff --git a/tests/test_nanshe/test_learner.py b/tests/test_nanshe/test_learner.py index 8f7c3262..1c28855b 100644 --- a/tests/test_nanshe/test_learner.py +++ b/tests/test_nanshe/test_learner.py @@ -15,6 +15,7 @@ import h5py import numpy +import nanshe.util.iters import nanshe.util.xnumpy import nanshe.util.wrappers import nanshe.io.hdf5.record @@ -411,7 +412,7 @@ def setup_2d(a_callable): image_stack = numpy.zeros((bases_images.shape[0] * len(ramp),) + bases_images.shape[1:], dtype=bases_images.dtype) - for i in xrange(len(bases_images)): + for i in nanshe.util.iters.irange(len(bases_images)): image_stack_slice = slice(i * len(ramp), (i+1) * len(ramp), 1) image_stack[image_stack_slice] = nanshe.util.xnumpy.all_permutations_operation( @@ -801,7 +802,7 @@ def setup_3d(a_callable): (bases_images3.shape[0] * len(ramp),) + bases_images3.shape[1:], dtype=bases_images3.dtype ) - for i in xrange(len(bases_images3)): + for i in nanshe.util.iters.irange(len(bases_images3)): image_stack_slice3 = slice(i * len(ramp), (i+1) * len(ramp), 1) image_stack3[image_stack_slice3] = nanshe.util.xnumpy.all_permutations_operation( @@ -884,7 +885,7 @@ def test_main_1(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -896,7 +897,7 @@ def test_main_1(): matched = dict() unmatched_points = numpy.arange(len(test_main_1.points)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_main_1.points[j]).all(): @@ -929,7 +930,7 @@ def test_main_2(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -941,7 +942,7 @@ def test_main_2(): matched = dict() unmatched_points = numpy.arange(len(test_main_2.points)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_main_2.points[j]).all(): @@ -987,7 +988,7 @@ def test_main_3(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -999,7 +1000,7 @@ def test_main_3(): matched = dict() unmatched_points = numpy.arange(len(test_main_3.points)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_main_3.points[j]).all(): @@ -1033,7 +1034,7 @@ def test_main_4(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1045,7 +1046,7 @@ def test_main_4(): matched = dict() unmatched_points = numpy.arange(len(test_main_4.points3)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_main_4.points3[j]).all(): @@ -1079,7 +1080,7 @@ def test_main_5(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1091,7 +1092,7 @@ def test_main_5(): matched = dict() unmatched_points = numpy.arange(len(test_main_5.points3)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_main_5.points3[j]).all(): @@ -1137,7 +1138,7 @@ def test_main_6(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1149,7 +1150,7 @@ def test_main_6(): matched = dict() unmatched_points = numpy.arange(len(test_main_6.points3)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_main_6.points3[j]).all(): @@ -1178,7 +1179,7 @@ def test_generate_neurons_io_handler_1(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1190,7 +1191,7 @@ def test_generate_neurons_io_handler_1(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_io_handler_1.points)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_io_handler_1.points[j]).all(): @@ -1219,7 +1220,7 @@ def test_generate_neurons_io_handler_2(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1231,7 +1232,7 @@ def test_generate_neurons_io_handler_2(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_io_handler_2.points)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_io_handler_2.points[j]).all(): @@ -1273,7 +1274,7 @@ def test_generate_neurons_io_handler_3(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1285,7 +1286,7 @@ def test_generate_neurons_io_handler_3(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_io_handler_3.points)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_io_handler_3.points[j]).all(): @@ -1315,7 +1316,7 @@ def test_generate_neurons_io_handler_4(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1327,7 +1328,7 @@ def test_generate_neurons_io_handler_4(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_io_handler_4.points3)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_io_handler_4.points3[j]).all(): @@ -1357,7 +1358,7 @@ def test_generate_neurons_io_handler_5(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1369,7 +1370,7 @@ def test_generate_neurons_io_handler_5(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_io_handler_5.points3)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_io_handler_5.points3[j]).all(): @@ -1411,7 +1412,7 @@ def test_generate_neurons_io_handler_6(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1423,7 +1424,7 @@ def test_generate_neurons_io_handler_6(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_io_handler_6.points3)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_io_handler_6.points3[j]).all(): @@ -1452,7 +1453,7 @@ def test_generate_neurons_a_block_1(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1464,7 +1465,7 @@ def test_generate_neurons_a_block_1(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_a_block_1.points)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_a_block_1.points[j]).all(): @@ -1494,7 +1495,7 @@ def test_generate_neurons_a_block_2(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1506,7 +1507,7 @@ def test_generate_neurons_a_block_2(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_a_block_2.points3)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_a_block_2.points3[j]).all(): @@ -1535,7 +1536,7 @@ def test_generate_neurons_blocks_1(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1547,7 +1548,7 @@ def test_generate_neurons_blocks_1(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_blocks_1.points)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_blocks_1.points[j]).all(): @@ -1589,7 +1590,7 @@ def test_generate_neurons_blocks_2(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1601,7 +1602,7 @@ def test_generate_neurons_blocks_2(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_blocks_2.points)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_blocks_2.points[j]).all(): @@ -1631,7 +1632,7 @@ def test_generate_neurons_blocks_3(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1643,7 +1644,7 @@ def test_generate_neurons_blocks_3(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_blocks_3.points3)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_blocks_3.points3[j]).all(): @@ -1685,7 +1686,7 @@ def test_generate_neurons_blocks_4(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1697,7 +1698,7 @@ def test_generate_neurons_blocks_4(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_blocks_4.points3)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_blocks_4.points3[j]).all(): @@ -1754,7 +1755,7 @@ def test_generate_neurons_1(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1766,7 +1767,7 @@ def test_generate_neurons_1(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_1.points)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_1.points[j]).all(): @@ -1824,7 +1825,7 @@ def test_generate_neurons_2(): neuron_maxes = (neurons["image"] == nanshe.util.xnumpy.expand_view(neurons["max_F"], neurons["image"].shape[1:])) neuron_max_points = [] - for i in xrange(len(neuron_maxes)): + for i in nanshe.util.iters.irange(len(neuron_maxes)): neuron_max_points.append( numpy.array(neuron_maxes[i].nonzero()).mean(axis=1).round().astype(int) ) @@ -1836,7 +1837,7 @@ def test_generate_neurons_2(): matched = dict() unmatched_points = numpy.arange(len(test_generate_neurons_2.points3)) - for i in xrange(len(neuron_max_points)): + for i in nanshe.util.iters.irange(len(neuron_max_points)): new_unmatched_points = [] for j in unmatched_points: if not (neuron_max_points[i] == test_generate_neurons_2.points3[j]).all(): diff --git a/tests/test_nanshe/test_util/__init__.py b/tests/test_nanshe/test_util/__init__.py index 4bde10ee..55d1edf5 100644 --- a/tests/test_nanshe/test_util/__init__.py +++ b/tests/test_nanshe/test_util/__init__.py @@ -6,7 +6,7 @@ ] -import test_prof -import test_wrappers -import test_xglob -import testPathHelpers +from tests.test_nanshe.test_util import test_prof +from tests.test_nanshe.test_util import test_wrappers +from tests.test_nanshe.test_util import test_xglob +from tests.test_nanshe.test_util import testPathHelpers diff --git a/tests/test_nanshe/test_util/test_prof.py b/tests/test_nanshe/test_util/test_prof.py index d2cc5173..c465b505 100644 --- a/tests/test_nanshe/test_util/test_prof.py +++ b/tests/test_nanshe/test_util/test_prof.py @@ -5,7 +5,11 @@ __date__ = "$Jul 30, 2014 16:57:43 EDT$" -import StringIO +try: + from StringIO import StringIO +except ImportError: + from io import StringIO + import logging import re import sys @@ -16,7 +20,7 @@ class TestProf(object): def setup(self): - self.stream = StringIO.StringIO() + self.stream = StringIO() self.handler = logging.StreamHandler(self.stream) self.handler.setFormatter(logging.Formatter(logging.BASIC_FORMAT)) @@ -199,7 +203,7 @@ def test_log_call_7(self): def test(a, b=5): return(a + b) - expected_traceback = StringIO.StringIO() + expected_traceback = StringIO() try: test("c") @@ -258,6 +262,7 @@ def __call__(self): self.handler.flush() result_2 = self.stream.getvalue() self.stream.truncate(0) + result_2 = result_2.strip("\0") print(result_2) @@ -301,6 +306,7 @@ def __call__(self): self.handler.flush() result_2 = self.stream.getvalue() self.stream.truncate(0) + result_2 = result_2.strip("\0") print(result_2) diff --git a/tests/test_nanshe/test_util/test_wrappers.py b/tests/test_nanshe/test_util/test_wrappers.py index 4242215a..0f026ae7 100644 --- a/tests/test_nanshe/test_util/test_wrappers.py +++ b/tests/test_nanshe/test_util/test_wrappers.py @@ -6,6 +6,7 @@ import functools +import sys import PyQt4 import PyQt4.QtCore @@ -211,7 +212,12 @@ def __init__(self): assert ClassWrapped.__init__ != Class.__init__ assert not hasattr(Class.__init__, "__wrapped__") assert hasattr(ClassWrapped.__init__, "__wrapped__") - assert ClassWrapped.__init__.__wrapped__ != Class.__init__ + + if sys.version_info.major < 3: + assert ClassWrapped.__init__.__wrapped__ != Class.__init__ + else: + assert ClassWrapped.__init__.__wrapped__ == Class.__init__ + assert ClassWrapped.__wrapped__.__init__ == Class.__init__ @@ -232,7 +238,12 @@ def __init__(self): assert ClassWrapped.__init__ != Class.__init__ assert not hasattr(Class.__init__, "__wrapped__") assert hasattr(ClassWrapped.__init__, "__wrapped__") - assert ClassWrapped.__init__.__wrapped__ != Class.__init__ + + if sys.version_info.major < 3: + assert ClassWrapped.__init__.__wrapped__ != Class.__init__ + else: + assert ClassWrapped.__init__.__wrapped__ == Class.__init__ + assert ClassWrapped.__wrapped__.__init__ == Class.__init__ @@ -261,7 +272,12 @@ def func_0(self): assert ClassWrapped.func_0 != Class.func_0 assert not hasattr(Class.func_0, "__wrapped__") assert hasattr(ClassWrapped.func_0, "__wrapped__") - assert ClassWrapped.func_0.__wrapped__ != Class.func_0 + + if sys.version_info.major < 3: + assert ClassWrapped.func_0.__wrapped__ != Class.func_0 + else: + assert ClassWrapped.func_0.__wrapped__ == Class.func_0 + assert ClassWrapped.__wrapped__.func_0 == Class.func_0 def test_unwrap(self): @@ -277,48 +293,49 @@ def func_0(): def test_tied_call_args(self): def func_0(a, b=5, *v, **k): - return(a + b + sum(v) + sum(k.values())) + return(a + b + sum(v) + sum(list(k.values()))) tied_args, args, kwargs = nanshe.util.wrappers.tied_call_args( func_0, 1 ) - assert tied_args.items() == [("a", 1), ("b", 5)] + assert list(tied_args.items()) == [("a", 1), ("b", 5)] assert args == tuple() - assert kwargs.items() == [] + assert list(kwargs.items()) == [] tied_args, args, kwargs = nanshe.util.wrappers.tied_call_args( func_0, a=1, c=7 ) - assert tied_args.items() == [("a", 1), ("b", 5)] + assert list(tied_args.items()) == [("a", 1), ("b", 5)] assert args == tuple() - assert kwargs.items() == [("c", 7)] + assert list(kwargs.items()) == [("c", 7)] tied_args, args, kwargs = nanshe.util.wrappers.tied_call_args( func_0, 1, 2, 3, c=7 ) - assert tied_args.items() == [("a", 1), ("b", 2)] + assert list(tied_args.items()) == [("a", 1), ("b", 2)] assert args == (3,) - assert kwargs.items() == [("c", 7)] + assert list(kwargs.items()) == [("c", 7)] def test_repack_call_args(self): def func_0(a, b=5, *v, **k): - return(a + b + sum(v) + sum(k.values())) + return(a + b + sum(v) + sum(list(k.values()))) args, kwargs = nanshe.util.wrappers.repack_call_args(func_0, 1) assert args == (1,) - assert kwargs.items() == [("b", 5)] + assert list(kwargs.items()) == [("b", 5)] args, kwargs = nanshe.util.wrappers.repack_call_args( func_0, a=1, c=7 ) + assert args == tuple() - assert kwargs.items() == [("a", 1), ("c", 7), ("b", 5)] + assert sorted(kwargs.items()) == [("a", 1), ("b", 5), ("c", 7)] args, kwargs = nanshe.util.wrappers.repack_call_args( func_0, 1, 2, 3, c=7 ) assert args == (1, 2, 3) - assert kwargs.items() == [("c", 7)] + assert list(kwargs.items()) == [("c", 7)] def setup_with_setup_state_2(a_callable): diff --git a/tests/test_nanshe/test_util/test_xglob.py b/tests/test_nanshe/test_util/test_xglob.py index 02de3a88..275a3c6f 100644 --- a/tests/test_nanshe/test_util/test_xglob.py +++ b/tests/test_nanshe/test_util/test_xglob.py @@ -5,6 +5,12 @@ import nanshe.util.xglob +try: + xrange +except NameError: + xrange = range + + class TestXGlob(object): num_files = 10 @@ -14,21 +20,26 @@ def setup(self): self.temp_dir = tempfile.mkdtemp() self.temp_files = [] + temp_files_dict = dict() for i in xrange(TestXGlob.num_files): - self.temp_files.append(tempfile.NamedTemporaryFile(suffix=".tif", dir=self.temp_dir)) + each_tempfile = tempfile.NamedTemporaryFile( + suffix=".tif", dir=self.temp_dir + ) + temp_files_dict[each_tempfile.name] = each_tempfile - self.temp_files.sort(cmp=lambda a, b: 2*(a.name > b.name) - 1) + for each_filename in sorted(temp_files_dict.keys()): + self.temp_files.append(temp_files_dict[each_filename]) def test_expand_pathname_list(self): - import itertools + import nanshe.util.iters matched_filenames = nanshe.util.xglob.expand_pathname_list(self.temp_dir + "/*.tif") - matched_filenames.sort(cmp=lambda a, b: 2*(a > b) - 1) + matched_filenames = sorted(matched_filenames) assert (len(matched_filenames) == len(self.temp_files)) - for each_l, each_f in itertools.izip(matched_filenames, self.temp_files): + for each_l, each_f in nanshe.util.iters.izip(matched_filenames, self.temp_files): assert (each_l == each_f.name) def teardown(self):