Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Paruch Group
Hystorian
Commits
b90a0943
Commit
b90a0943
authored
Jun 07, 2021
by
Christian Weymann
Browse files
Add the necessary functions to twodim.
parent
770e8858
Changes
2
Hide whitespace changes
Inline
Side-by-side
DomainStatsWorkflow.ipynb
View file @
b90a0943
...
...
@@ -9,14 +9,14 @@
},
{
"cell_type": "code",
"execution_count": 1
2
,
"execution_count": 1,
"metadata": {},
"outputs": [],
"source": [
"import numpy as np\n",
"import matplotlib.pyplot as plt\n",
"\n",
"import hystorian as
ms
\n",
"import hystorian as
hy
\n",
"import h5py\n",
"\n",
"import os\n",
...
...
@@ -34,26 +34,21 @@
},
{
"cell_type": "code",
"execution_count":
14
,
"execution_count":
2
,
"metadata": {},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Warning: 'data/to_convert/m21003b_topo_MC_009.sxm' as impossible to convert. File has been ignored.\n",
"0/1 files merged into 'data\\m21003b.hdf5'\n",
"Warning: 'data/to_convert/m21006DSOI2_topo_MC_005.sxm' as impossible to convert. File has been ignored.\n",
"0/1 files merged into 'data\\m21006DSOI2.hdf5'\n",
"Warning: 'data/to_convert/m21007b_topo_MC_003.sxm' as impossible to convert. File has been ignored.\n",
"0/1 files merged into 'data\\m21007b.hdf5'\n",
"Warning: 'data/to_convert/old/m21007DSOC3_topo_MC_005.sxm' as impossible to convert. File has been ignored.\n",
"0/1 files merged into 'data\\m21007DSOC3.hdf5'\n",
"The following measurements were converted previously and have been ignored:\n",
"m21003b_topo_MC_009\n",
"m21004DSOJ4_topo_MC_005\n",
"m21004DSOJ4_topo_MC_008\n",
"m21005b_topo_MC_006\n",
"m21005b_topo_MC_009\n",
"m21006DSOI2_topo_MC_005\n",
"m21007b_topo_MC_003\n",
"m20036_topo_MC_006\n",
"m20036_topo_MC_007\n",
"m20036_topo_MC_008\n",
...
...
@@ -70,21 +65,18 @@
"m21003DSOJ2_topo_MC_009\n",
"m21005DSOF2_topo_MC_005\n",
"m21005DSOF2_topo_MC_008\n",
"m21007DSOC3_topo_MC_005\n",
"\n",
"\n",
"The following files were skipped:\n",
"data\\to_convert\\\n",
"data\\to_convert\\m21003b_topo_MC_009.hdf5\n",
"data\\to_convert\\m21006DSOI2_topo_MC_005.hdf5\n",
"data\\to_convert\\m21007b_topo_MC_003.hdf5\n",
"data\\to_convert\\old\n",
"data\\to_convert\\old\\m21007DSOC3_topo_MC_005.hdf5\n",
"If any of these files should have been included, check the allowed extensions.\n"
]
}
],
"source": [
"allowed_extensions = set(('.i
d
w', '.sxm'))\n",
"allowed_extensions = set(('.i
b
w', '.sxm'))\n",
"raw_data_path = os.path.join('data', 'to_convert')\n",
"data_path = 'data'\n",
"skipped_list = []\n",
...
...
@@ -114,7 +106,7 @@
" to_convert.setdefault(combined_name, []).append(fn.replace('\\\\', '/')) \n",
" \n",
"for combined_name, filelist in to_convert.items():\n",
"
ms
.io.read_file.merge_hdf5(filelist, combined_name)\n",
"
hy
.io.read_file.merge_hdf5(filelist, combined_name)\n",
"\n",
"for combined_name in to_convert:\n",
" sample_name = os.path.basename(combined_name)\n",
...
...
%% Cell type:markdown id: tags:
General purpose imports
%% Cell type:code id: tags:
```
python
import
numpy
as
np
import
matplotlib.pyplot
as
plt
import
hystorian
as
ms
import
hystorian
as
hy
import
h5py
import
os
from
glob
import
glob
from
IPython.display
import
clear_output
```
%% Cell type:markdown id: tags:
Next, we load all the data in
`raw_data_path`
into the
`.hdf5`
format and store it in
`data_path`
. We make one
`.hdf5`
file per sample, by assuming the raw data filenames start with the sample name followed by an underscore.
%% Cell type:code id: tags:
```
python
allowed_extensions
=
set
((
'.i
d
w'
,
'.sxm'
))
allowed_extensions
=
set
((
'.i
b
w'
,
'.sxm'
))
raw_data_path
=
os
.
path
.
join
(
'data'
,
'to_convert'
)
data_path
=
'data'
skipped_list
=
[]
already_converted
=
[]
to_convert
=
{}
for
fn
in
glob
(
os
.
path
.
join
(
raw_data_path
,
'**'
),
recursive
=
True
):
#skip anything that doesn't have an allowed extension
if
os
.
path
.
splitext
(
fn
)[
1
]
not
in
allowed_extensions
:
skipped_list
.
append
(
fn
)
continue
measurement_name
=
os
.
path
.
splitext
(
os
.
path
.
basename
(
fn
))[
0
]
sample_name
=
measurement_name
.
split
(
'_'
)[
0
]
combined_name
=
os
.
path
.
join
(
data_path
,
sample_name
)
#check if the sample file exists
if
os
.
path
.
isfile
(
combined_name
+
'.hdf5'
):
with
h5py
.
File
(
combined_name
+
'.hdf5'
,
'r'
)
as
f
:
#check if the measurement is already there
if
f
'datasets/
{
measurement_name
}
'
in
f
:
already_converted
.
append
(
measurement_name
)
continue
#Since the measurement is not there, add it to the list to convert
to_convert
.
setdefault
(
combined_name
,
[]).
append
(
fn
.
replace
(
'
\\
'
,
'/'
))
for
combined_name
,
filelist
in
to_convert
.
items
():
ms
.
io
.
read_file
.
merge_hdf5
(
filelist
,
combined_name
)
hy
.
io
.
read_file
.
merge_hdf5
(
filelist
,
combined_name
)
for
combined_name
in
to_convert
:
sample_name
=
os
.
path
.
basename
(
combined_name
)
with
h5py
.
File
(
combined_name
+
'.hdf5'
,
'r+'
)
as
f
:
#check if sample thickness is given, ask for it if not
if
f
[
'datasets'
].
attrs
.
get
(
'Thickness_uc'
)
is
None
:
try
:
f
[
'datasets'
].
attrs
.
create
(
'Thickness_uc'
,
int
(
input
(
f
'What is the thickness of
{
sample_name
}
in uc?'
)))
except
ValueError
:
pass
print
(
'The following measurements were converted previously and have been ignored:'
)
for
mn
in
already_converted
:
print
(
mn
)
print
(
'
\n
'
)
print
(
'The following files were skipped:'
)
for
fn
in
skipped_list
:
print
(
fn
)
print
(
'If any of these files should have been included, check the allowed extensions.'
)
```
%%%% Output: stream
Warning: 'data/to_convert/m21003b_topo_MC_009.sxm' as impossible to convert. File has been ignored.
0/1 files merged into 'data\m21003b.hdf5'
Warning: 'data/to_convert/m21006DSOI2_topo_MC_005.sxm' as impossible to convert. File has been ignored.
0/1 files merged into 'data\m21006DSOI2.hdf5'
Warning: 'data/to_convert/m21007b_topo_MC_003.sxm' as impossible to convert. File has been ignored.
0/1 files merged into 'data\m21007b.hdf5'
Warning: 'data/to_convert/old/m21007DSOC3_topo_MC_005.sxm' as impossible to convert. File has been ignored.
0/1 files merged into 'data\m21007DSOC3.hdf5'
The following measurements were converted previously and have been ignored:
m21003b_topo_MC_009
m21004DSOJ4_topo_MC_005
m21004DSOJ4_topo_MC_008
m21005b_topo_MC_006
m21005b_topo_MC_009
m21006DSOI2_topo_MC_005
m21007b_topo_MC_003
m20036_topo_MC_006
m20036_topo_MC_007
m20036_topo_MC_008
m20037_topo_MC_003
m20037_topo_MC_004
m20038_topo_MC_006
m20038_topo_MC_007
m20038_topo_MC_010
m20039_topo_MC_010
m20039_topo_MC_011
m20041_topo_MC_006
m20041_topo_MC_007
m21003DSOJ2_topo_MC_006
m21003DSOJ2_topo_MC_009
m21005DSOF2_topo_MC_005
m21005DSOF2_topo_MC_008
m21007DSOC3_topo_MC_005
The following files were skipped:
data\to_convert\
data\to_convert\m21003b_topo_MC_009.hdf5
data\to_convert\m21006DSOI2_topo_MC_005.hdf5
data\to_convert\m21007b_topo_MC_003.hdf5
data\to_convert\old
data\to_convert\old\m21007DSOC3_topo_MC_005.hdf5
If any of these files should have been included, check the allowed extensions.
%% Cell type:code id: tags:
```
python
for
fn
in
glob
(
'data/*'
):
print
(
fn
.
replace
(
'
\\
'
,
'/'
))
```
%%%% Output: stream
data/Celine
data/LoopsCeline-color.png
data/m20036.hdf5
data/m20037.hdf5
data/m20038.hdf5
data/m20039.hdf5
data/m20041.hdf5
data/m21003b.hdf5
data/m21003DSOJ2.hdf5
data/m21004DSOJ4.hdf5
data/m21005b.hdf5
data/m21005b_topo_MC_006.sxm
data/m21005DSOF2.hdf5
data/m21006DSOI2.hdf5
data/m21007b.hdf5
data/m21007DSOC3.hdf5
data/test.txt
data/to_convert
%% Cell type:code id: tags:
```
python
with
h5py
.
File
(
'data/m20038.hdf5'
,
'r'
)
as
f
:
for
key
in
f
[
'datasets'
]:
print
(
key
)
```
%%%% Output: stream
m20038_topo_MC_006
m20038_topo_MC_007
m20038_topo_MC_010
%% Cell type:code id: tags:
```
python
```
...
...
hystorian/processing/twodim.py
View file @
b90a0943
...
...
@@ -9,7 +9,7 @@ from scipy.signal import medfilt, cspline2d
from
scipy.optimize
import
curve_fit
from
scipy.ndimage.morphology
import
binary_erosion
,
binary_dilation
from
scipy.ndimage.measurements
import
label
from
scipy
import
interpolate
,
ndimage
from
scipy
import
interpolate
,
ndimage
,
signal
from
skimage.morphology
import
skeletonize
from
skimage
import
img_as_ubyte
...
...
@@ -3629,4 +3629,123 @@ def find_grown_element(contour_init, contour_end, path1, path2, img_contour_end)
data
=
(
img_bin
).
astype
(
np
.
uint8
)
(
cnts
,
img
)
=
cv2
.
findContours
(
data
,
cv2
.
RETR_TREE
,
cv2
.
CHAIN_APPROX_NONE
)
return
cnts
def
radial_average
(
data
,
angle
=
None
,
tol
=
np
.
pi
/
18
):
'''
Do the radial avarage of an image
Parameters
----------
data : 2d array-like
angle : if None, the avarge is over all directions
else, indicates the direction in which to do the average, as an angle in radians
tol : if an angle is given, the average will be done between angle-tol and angle+tol
Returns
-------
index : 1d array of the distances from the center, in pixel
mean : 1d array of the same size as index with the corresponding averages
'''
#find the distance of each pixel to the center
sx
,
sy
=
data
.
shape
X
,
Y
=
np
.
ogrid
[
0
:
sx
,
0
:
sy
]
R
=
np
.
hypot
(
X
-
sx
/
2
,
Y
-
sy
/
2
)
#round to closest pixel and extract unique values
labels
=
R
.
astype
(
np
.
int
)
index
=
np
.
unique
(
labels
)
#if angle is specified, mask the data to only include data in the slice defined by angle +/- tol
if
angle
is
not
None
:
mask
=
np
.
logical_and
(
np
.
arctan2
(
X
-
sx
/
2
,
Y
-
sy
/
2
)
<
(
angle
+
tol
),
np
.
arctan2
(
X
-
sx
/
2
,
Y
-
sy
/
2
)
>
(
angle
-
tol
))
return
index
,
ndimage
.
mean
(
data
[
mask
],
labels
=
labels
[
mask
],
index
=
index
)
return
index
,
ndimage
.
mean
(
data
,
labels
=
labels
,
index
=
index
)
def
projected_average
(
data
,
angle
=
0
,
symmetrical
=
True
):
'''
Project an image along an axis passing through its center
Parameters
----------
data : 2d array-like
angle : the direction of the axis on which to project, as an angle in radians
symmetrical : if True, the projection is done symmetrically around the center of the image
else, it is done from one edge of the image to the other
Returns
-------
index : 1d array of the distances along the projection axis from the center, in pixel
mean : 1d array of the same size as index with the corresponding averages
'''
#Calculate the projection of each point on the unit vector in the direction of angle
sx
,
sy
=
data
.
shape
X
,
Y
=
np
.
ogrid
[
0
:
sx
,
0
:
sy
]
P
=
(
X
-
sx
/
2
)
*
np
.
cos
(
angle
)
+
(
Y
-
sy
/
2
)
*
np
.
sin
(
angle
)
#round to closest pixel, in absolute value if symmetrical is True
if
symmetrical
:
labels
=
np
.
abs
(
P
.
round
().
astype
(
np
.
int
))
else
:
labels
=
P
.
round
().
astype
(
np
.
int
)
index
=
np
.
unique
(
labels
)
return
index
,
ndimage
.
mean
(
data
,
labels
=
labels
,
index
=
index
)
def
gaussian_blur
(
data
,
r
=
1
,
padding
=
'wrap'
):
'''
Blur an image using a gaussian kernel
Parameters
----------
data : 2d array-like
r : the standard deviation of the kernel
padding : passed to np.pad(), determines how the image is padded to avoid edge effects
Returns
-------
The blurred image, same shape as the original
'''
if
r
==
0
:
#no blurring
return
data
else
:
l
=
len
(
data
[
0
])
padded
=
np
.
pad
(
data
,
l
,
padding
)
#pad each direction the size of the image
k
=
np
.
outer
(
signal
.
gaussian
(
6
*
r
,
r
),
signal
.
gaussian
(
6
*
r
,
r
))
return
signal
.
fftconvolve
(
padded
,
k
,
mode
=
'same'
)[
l
:
2
*
l
,
l
:
2
*
l
]
/
k
.
sum
()
def
autocorrelate
(
data
,
mode
=
'full'
,
padding
=
None
,
normalise
=
True
):
'''
Compute the 2D autocorrelation of an image
Parameters
----------
data : 2d array-like
mode : 'full' or 'same'. 'full' returns the full autocorrelation, which is twice as
large in every dimension as the original image. Same returns the central region
of the same dimension as the input data
padding : None or valid mode for np.pad(). If not None, determines how the image is
padded to avoid edge effects
normalise: boolean, whether to normalise the data to z-scores before computing the autocorrelation
Returns
-------
The 2D autocorrelation
'''
if
normalise
:
normalised
=
(
data
-
data
.
mean
())
/
data
.
std
()
else
:
normalised
=
data
if
padding
is
not
None
:
l
=
len
(
data
[
0
])
padded
=
np
.
pad
(
normalised
,
l
,
padding
)
return
signal
.
fftconvolve
(
normalised
,
padded
[::
-
1
,::
-
1
],
mode
=
'same'
)
/
data
.
size
return
signal
.
fftconvolve
(
normalised
,
normalised
[::
-
1
,::
-
1
],
mode
=
mode
)
/
data
.
size
Write
Preview
Supports
Markdown
0%
Try again
or
attach a new file
.
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment