From fb9148516c50ffb2225914b4ab99a98f39ebcfad Mon Sep 17 00:00:00 2001 From: James Ball Date: Sat, 27 Apr 2024 11:44:11 +0200 Subject: [PATCH 1/6] Add Astra support for grain shape reconstruction --- .../S3DXRD/2_S3DXRD_sinograms_map_astra.ipynb | 950 +++++++++++++++++ ...DXRD_sinograms_map_minor_phase_astra.ipynb | 997 ++++++++++++++++++ ImageD11/sinograms/sinogram.py | 178 +++- 3 files changed, 2121 insertions(+), 4 deletions(-) create mode 100644 ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map_astra.ipynb create mode 100644 ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map_minor_phase_astra.ipynb diff --git a/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map_astra.ipynb b/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map_astra.ipynb new file mode 100644 index 00000000..591b480d --- /dev/null +++ b/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map_astra.ipynb @@ -0,0 +1,950 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Jupyter notebook based on ImageD11 to process scanning 3DXRD data\n", + "# Written by Haixing Fang, Jon Wright and James Ball\n", + "## Date: 28/03/2024" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# USER: Change the path below to point to your local copy of ImageD11:\n", + "\n", + "import os\n", + "\n", + "home_dir = !echo $HOME\n", + "home_dir = str(home_dir[0])\n", + "\n", + "# USER: You can change this location if you want\n", + "\n", + "id11_code_path = os.path.join(home_dir, \"Code/ImageD11\")\n", + "\n", + "import sys\n", + "\n", + "sys.path.insert(0, id11_code_path)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# import functions we need\n", + "\n", + "import concurrent.futures\n", + "\n", + "%matplotlib ipympl\n", + "\n", + "import h5py\n", + "from tqdm.notebook import tqdm\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import ImageD11.columnfile\n", + "import ImageD11.sinograms.dataset\n", + "from ImageD11.grain import grain\n", + "from ImageD11.peakselect import select_ring_peaks_by_intensity\n", + "from ImageD11.sinograms.sinogram import GrainSinogram, build_slice_arrays, write_slice_recon, write_h5, get_2d_peaks_from_4d_peaks, run_astra\n", + "from ImageD11.sinograms.roi_iradon import run_iradon\n", + "\n", + "from skimage.filters import threshold_otsu\n", + "from skimage.morphology import convex_hull_image\n", + "\n", + "import ImageD11.nbGui.nb_utils as utils\n", + "\n", + "import ipywidgets as widgets\n", + "from ipywidgets import interact" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# NOTE: For old datasets before the new directory layout structure, we don't distinguish between RAW_DATA and PROCESSED_DATA\n", + "\n", + "### USER: specify your experimental directory\n", + "\n", + "rawdata_path = \"/data/visitor/ihma439/id11/20231211/RAW_DATA\"\n", + "\n", + "!ls -lrt {rawdata_path}\n", + "\n", + "### USER: specify where you want your processed data to go\n", + "\n", + "processed_data_root_dir = \"/data/visitor/ihma439/id11/20231211/PROCESSED_DATA/James/nb_testing\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# USER: pick a sample and a dataset you want to segment\n", + "\n", + "sample = \"FeAu_0p5_tR_nscope\"\n", + "dataset = \"top_100um\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# desination of H5 files\n", + "\n", + "dset_path = os.path.join(processed_data_root_dir, sample, f\"{sample}_{dataset}\", f\"{sample}_{dataset}_dataset.h5\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Load the dataset\n", + "ds = ImageD11.sinograms.dataset.load(dset_path)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# If the sinograms are only half-sinograms (we scanned dty across half the sample rather than the full sample), set the below to true:\n", + "is_half_scan = False" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if is_half_scan:\n", + " ds.correct_bins_for_half_scan()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Import 4D peaks\n", + "\n", + "cf_4d = ds.get_cf_4d_from_disk()\n", + "\n", + "cf_4d.parameters.loadparameters(ds.parfile)\n", + "cf_4d.updateGeometry()\n", + "\n", + "print(f\"Read {cf_4d.nrows} 4D peaks\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# here we are filtering our peaks (cf_4d) to select only the strongest ones\n", + "# this time as opposed to indexing, our frac is slightly weaker but we are NOT filtering in dstar!!!!!\n", + "# this means many more peaks per grain = stronger sinograms\n", + "\n", + "# USER: modify the \"frac\" parameter below and re-run the cell until the orange dot sits nicely on the \"elbow\" of the blue line\n", + "# this indicates the fractional intensity cutoff we will select\n", + "# if the blue line does not look elbow-shaped in the logscale plot, try changing the \"doplot\" parameter (the y scale of the logscale plot) until it does\n", + "\n", + "cf_strong_frac = 0.995\n", + "cf_strong_dstol = 0.005\n", + "\n", + "cf_strong = select_ring_peaks_by_intensity(cf_4d, frac=cf_strong_frac, dstol=cf_strong_dstol, dsmax=cf_4d.ds.max(), doplot=0.9)\n", + "print(cf_4d.nrows)\n", + "cf_strong.nrows" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# import the grains from disk\n", + "\n", + "grains = ds.get_grains_from_disk()\n", + "print(f\"{len(grains)} grains imported\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# assign peaks to the grains\n", + "\n", + "peak_assign_tol = 0.25\n", + "utils.assign_peaks_to_grains(grains, cf_strong, peak_assign_tol)\n", + "\n", + "for grain_label, g in enumerate(grains):\n", + " g.npks_4d = np.sum(cf_strong.grain_id == grain_label)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# let's make a GrainSinogram object for each grain\n", + "\n", + "grainsinos = [GrainSinogram(g, ds) for g in grains]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Now let's determine the positions of each grain from the 4D peaks\n", + "\n", + "for grain_label, gs in enumerate(grainsinos):\n", + " gs.update_lab_position_from_peaks(cf_strong, grain_label)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# We can also determine the RGB IPF colours of the grains which will be useful for plotting\n", + "# To do this, we first need to set a reference unitcell for each grain\n", + "# This will be used to determine the Orix Phase and therefore Orix Orientation\n", + "\n", + "cf_pars = cf_strong.parameters.get_parameters()\n", + "spacegroup = 229 # spacegroup for BCC iron\n", + "cf_pars[\"cell_lattice_[P,A,B,C,I,F,R]\"] = spacegroup\n", + "\n", + "ref_ucell = ImageD11.unitcell.unitcell_from_parameters(cf_pars)\n", + "\n", + "for g in grains:\n", + " g.ref_unitcell = ref_ucell\n", + "\n", + "# Now colours should work\n", + "\n", + "utils.get_rgbs_for_grains(grains)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "utils.plot_all_ipfs(grains)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Now we can plot our grain positions and RGB colours:\n", + "\n", + "# plt.style.use('dark_background')\n", + "fig, ax = plt.subplots(2,2, figsize=(12,12))\n", + "a = ax.ravel()\n", + "x = [g.translation[0] for g in grains]\n", + "y = [g.translation[1] for g in grains]\n", + "s = [g.npks_4d/10 for g in grains]\n", + "a[0].scatter(y, x, c=[g.rgb_z for g in grains], s=s)\n", + "a[0].set(title='IPF color Z', aspect='equal')\n", + "a[1].scatter(y, x, c=[g.rgb_y for g in grains], s=s)\n", + "a[1].set(title='IPF color Y', aspect='equal')\n", + "a[2].scatter(y, x, c=[g.rgb_x for g in grains], s=s)\n", + "a[2].set(title='IPF color X', aspect='equal')\n", + "a[3].scatter(y, x, c=s)\n", + "a[3].set(title='Number of 4D peaks', aspect='equal')\n", + "\n", + "fig.supxlabel(\"<- Lab y (transverse)\")\n", + "fig.supylabel(\"Lab x (beam) ->\")\n", + "\n", + "for a in ax.ravel():\n", + " a.invert_xaxis()\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# we need to determine what the value of dty is where the rotation axis intercepts the beam\n", + "# we'll call this y0\n", + "# should be the result of the centre-of-mass fit\n", + "\n", + "fig, ax = plt.subplots()\n", + "\n", + "sample_y0s = [gs.recon_y0 for gs in grainsinos]\n", + "\n", + "ax.plot(sample_y0s)\n", + "\n", + "plt.show()\n", + "\n", + "y0 = np.median(sample_y0s)\n", + "\n", + "print('y0 is', y0)\n", + "\n", + "# update the y0 for each grain with the median y0:\n", + "\n", + "for gs in grainsinos:\n", + " gs.update_recon_parameters(y0=y0)\n", + "\n", + "# the shift we have to apply to the reconstructions is equal to -y0/ystep (in integer coords)\n", + "\n", + "shift = -y0/ds.ystep\n", + "\n", + "print('shift is', shift)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# now let's do a whole-sample tomographic reconstruction\n", + "# generate sinogram for whole sample\n", + "\n", + "whole_sample_sino, xedges, yedges = np.histogram2d(cf_4d.dty, cf_4d.omega, bins=[ds.ybinedges, ds.obinedges])\n", + "\n", + "fig, ax = plt.subplots()\n", + "ax.imshow(whole_sample_sino, interpolation=\"nearest\", vmin=0)\n", + "ax.set_aspect(4)\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# \"quick\" whole-sample reconstruction\n", + "\n", + "pad = 50\n", + "\n", + "whole_sample_recon = run_astra(whole_sample_sino, ds.obincens, pad=pad, shift=shift, astra_method=\"FBP_CUDA\", niter=100)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# without a mask, MLEM can introduce artifacts in the corners\n", + "# so we can manually mask those out\n", + "\n", + "# we can incoporate our own mask too\n", + "# by modifying the below function\n", + "\n", + "def apply_manual_mask(mask_in):\n", + " mask_out = mask_in.copy()\n", + " \n", + " # mask_out[200:, 250:] = 0\n", + " \n", + " return mask_out\n", + "\n", + "# we should be able to easily segment this using scikit-image\n", + "recon_man_mask = apply_manual_mask(whole_sample_recon)\n", + "\n", + "# we can also override the threshold if we don't like it:\n", + "# manual_threshold = 0.05\n", + "manual_threshold = None\n", + "\n", + "if manual_threshold is None:\n", + " thresh = threshold_otsu(recon_man_mask)\n", + "else:\n", + " thresh = manual_threshold\n", + "\n", + "binary = recon_man_mask > thresh\n", + "\n", + "chull = convex_hull_image(binary)\n", + "\n", + "whole_sample_mask = chull\n", + "\n", + "fig, axs = plt.subplots(1, 3, sharex=True, sharey=True, constrained_layout=True)\n", + "axs[0].imshow(recon_man_mask, vmin=0, origin=\"lower\")\n", + "axs[1].imshow(binary, origin=\"lower\")\n", + "axs[2].imshow(chull, origin=\"lower\")\n", + "\n", + "axs[0].set_title(\"Reconstruction\")\n", + "axs[1].set_title(\"Binarised threshold\")\n", + "axs[2].set_title(\"Convex hull\")\n", + "\n", + "fig.supxlabel(\"<-- Y axis\")\n", + "fig.supylabel(\"Beam >\")\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# now we have a whole-sample reconstruction we can use as a sample mask\n", + "# let's build the sinograms for our grains\n", + "# before we do this, we need to determine our 2D peaks that will be used for the sinogram\n", + "# here we can get them from the 4D peaks:\n", + "\n", + "hkltol = 0.25\n", + "\n", + "gord, inds = get_2d_peaks_from_4d_peaks(ds.pk2d, cf_strong)\n", + "\n", + "for grain_label, gs in enumerate(tqdm(grainsinos)):\n", + " gs.prepare_peaks_from_4d(cf_strong, gord, inds, grain_label, hkltol)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# now we can actually generate the sinograms\n", + "\n", + "for gs in tqdm(grainsinos):\n", + " gs.build_sinogram()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# optionally correct the halfmask:\n", + "\n", + "if is_half_scan:\n", + " for gs in grainsinos:\n", + " gs.correct_halfmask()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Show sinogram of single grain\n", + "\n", + "gs = grainsinos[0]\n", + "\n", + "fig, ax = plt.subplots()\n", + "\n", + "ax.imshow(gs.ssino, aspect='auto')\n", + "ax.set_title(\"ssino\")\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# We can optionally correct each row of the sinogram by the ring current of that rotation\n", + "# This helps remove artifacts in the reconstruction\n", + "\n", + "correct_sinos_with_ring_current = True\n", + "if correct_sinos_with_ring_current:\n", + " ds.get_ring_current_per_scan()\n", + " \n", + " for gs in grainsinos:\n", + " gs.correct_ring_current(is_half_scan=is_half_scan)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Show sinogram of single grain\n", + "\n", + "gs = grainsinos[0]\n", + "\n", + "fig, ax = plt.subplots()\n", + "\n", + "ax.imshow(gs.ssino, aspect='auto')\n", + "ax.set_title(\"ssino\")\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# go straight to ASTRA\n", + "\n", + "gs = grainsinos[0]\n", + "\n", + "# update the parameters used for the iradon reconstruction\n", + "\n", + "niter = 500\n", + "\n", + "gs.update_recon_parameters(pad=pad, shift=shift, y0=y0, niter=niter, mask=whole_sample_mask)\n", + "\n", + "gs.recon(method=\"astra\", astra_method=\"EM_CUDA\")\n", + "\n", + "if is_half_scan:\n", + " halfmask_radius = 25\n", + " gs.mask_central_zingers(\"iradon\", radius=halfmask_radius)\n", + "\n", + "# view the result\n", + "\n", + "fig, axs = plt.subplots(1,2, figsize=(10,5))\n", + "axs[0].imshow(gs.ssino, aspect='auto')\n", + "axs[0].set_title(\"ssino\")\n", + "axs[1].imshow(gs.recons[\"astra\"], vmin=0, origin=\"lower\")\n", + "axs[1].set_title(\"Astra\")\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# once you're happy with the reconstruction parameters used, set them for all the grains\n", + "\n", + "for gs in grainsinos:\n", + " gs.update_recon_parameters(pad=pad, shift=shift, y0=y0, niter=niter, mask=whole_sample_mask)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# reconstruct all grains\n", + "\n", + "for gs in tqdm(grainsinos):\n", + " gs.recon(method=\"astra\", astra_method=\"EM_CUDA\")\n", + "\n", + " if is_half_scan:\n", + " gs.mask_central_zingers(\"astra\", radius=halfmask_radius)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "fig, a = plt.subplots(1,2,figsize=(10,5))\n", + "rec = a[0].imshow(grainsinos[0].recons[\"astra\"], vmin=0, origin=\"lower\")\n", + "sin = a[1].imshow(grainsinos[0].ssino, aspect='auto')\n", + "\n", + "# Function to update the displayed image based on the selected frame\n", + "def update_frame(i):\n", + " rec.set_array(grainsinos[i].recons[\"astra\"])\n", + " sin.set_array(grainsinos[i].ssino)\n", + " a[0].set(title=str(i))\n", + " fig.canvas.draw()\n", + "\n", + "# Create a slider widget to select the frame number\n", + "frame_slider = widgets.IntSlider(\n", + " value=0,\n", + " min=0,\n", + " max=len(grains) - 1,\n", + " step=1,\n", + " description='Grain:'\n", + ")\n", + "\n", + "interact(update_frame, i=frame_slider)\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Let's assemble all the recons into one map\n", + "\n", + "rgb_x_array, rgb_y_array, rgb_z_array, grain_labels_array, raw_intensity_array = build_slice_arrays(grainsinos, cutoff_level=0.3, method=\"astra\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# plot initial output\n", + "\n", + "fig, ax = plt.subplots(constrained_layout=True)\n", + "ax.imshow(rgb_z_array, origin=\"lower\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig, ax = plt.subplots(constrained_layout=True)\n", + "ax.imshow(grain_labels_array, origin=\"lower\") # originally 1,2,0\n", + "ax.set_title(\"Grain label map\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "fig, ax = plt.subplots(constrained_layout=True)\n", + "ax.imshow(raw_intensity_array, origin=\"lower\")\n", + "ax.set_title(\"Raw intensity array\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# look at all our recons in a grid\n", + "\n", + "n_grains_to_plot = 25\n", + "\n", + "grains_step = len(grainsinos)//n_grains_to_plot\n", + "\n", + "grid_size = np.ceil(np.sqrt(len(grainsinos[::grains_step]))).astype(int)\n", + "nrows = (len(grainsinos[::grains_step])+grid_size-1)//grid_size\n", + "\n", + "fig, axs = plt.subplots(grid_size, nrows, figsize=(10,10), layout=\"constrained\", sharex=True, sharey=True)\n", + "for i, ax in enumerate(axs.ravel()):\n", + " if i < len(grainsinos[::grains_step]):\n", + " # get corresponding grain for this axis\n", + " gs = grainsinos[::grains_step][i]\n", + " ax.imshow(gs.recons[\"astra\"], vmin=0, origin=\"lower\")\n", + " # ax.invert_yaxis()\n", + " ax.set_title(i)\n", + " \n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# write our results to disk\n", + "\n", + "write_h5(ds.grainsfile, grainsinos, write_grains_too=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# write the slice maps to disk too\n", + "\n", + "write_slice_recon(ds.grainsfile, slice_arrays)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "ds.save()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if 1:\n", + " raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Now that we're happy with our indexing parameters, we can run the below cell to do this in bulk for many samples/datasets\n", + "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", + "# you can add samples and datasets to skip in skips_dict\n", + "\n", + "skips_dict = {\n", + " \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "}\n", + "\n", + "dset_prefix = \"top\"\n", + "\n", + "sample_list = [\"FeAu_0p5_tR_nscope\"]\n", + " \n", + "samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n", + " \n", + "# manual override:\n", + "# samples_dict = {\"FeAu_0p5_tR_nscope\": [\"top_100um\", \"top_200um\"]}\n", + " \n", + "# now we have our samples_dict, we can process our data:\n", + "\n", + "for sample, datasets in samples_dict.items():\n", + " for dataset in datasets:\n", + " print(f\"Processing dataset {dataset} in sample {sample}\")\n", + " dset_path = os.path.join(processed_data_root_dir, sample, f\"{sample}_{dataset}\", f\"{sample}_{dataset}_dataset.h5\")\n", + " if not os.path.exists(dset_path):\n", + " print(f\"Missing DataSet file for {dataset} in sample {sample}, skipping\")\n", + " continue\n", + " \n", + " print(\"Importing DataSet object\")\n", + " \n", + " ds = ImageD11.sinograms.dataset.load(dset_path)\n", + " print(f\"I have a DataSet {ds.dset} in sample {ds.sample}\")\n", + " \n", + " if not os.path.exists(ds.grainsfile):\n", + " print(f\"Missing grains file for {dataset} in sample {sample}, skipping\")\n", + " continue\n", + " \n", + " # check grains file for existance of slice_recon, skip if it's there\n", + " with h5py.File(ds.grainsfile, \"r\") as hin:\n", + " if \"slice_recon\" in hin.keys():\n", + " print(f\"Already reconstructed {dataset} in {sample}, skipping\")\n", + " continue\n", + " \n", + " if is_half_scan:\n", + " ds.correct_bins_for_half_scan()\n", + " \n", + " print(\"Peaks\")\n", + " cf_4d = ds.get_cf_4d_from_disk()\n", + " cf_4d.parameters.loadparameters(ds.parfile)\n", + " cf_4d.updateGeometry()\n", + " \n", + " cf_strong = select_ring_peaks_by_intensity(cf_4d, frac=cf_strong_frac, dstol=cf_strong_dstol, dsmax=cf_4d.ds.max())\n", + " \n", + " print(\"Grains\")\n", + " grains = ds.get_grains_from_disk()\n", + " utils.assign_peaks_to_grains(grains, cf_strong, peak_assign_tol)\n", + " \n", + " grainsinos = [GrainSinogram(g, ds) for g in grains]\n", + " \n", + " print(\"Fitting grain positions\")\n", + " for grain_label, gs in enumerate(grainsinos):\n", + " gs.update_lab_position_from_peaks(cf_strong, grain_label)\n", + " \n", + " sample_y0s = [gs.recon_y0 for gs in grainsinos]\n", + " y0 = np.median(sample_y0s)\n", + " shift = -y0/ds.ystep\n", + " \n", + " cf_pars = cf_strong.parameters.get_parameters()\n", + " cf_pars[\"cell_lattice_[P,A,B,C,I,F,R]\"] = spacegroup\n", + " ref_ucell = ImageD11.unitcell.unitcell_from_parameters(cf_pars)\n", + "\n", + " for g in grains:\n", + " g.ref_unitcell = ref_ucell\n", + " \n", + " print(\"Determining RGB colours\")\n", + " utils.get_rgbs_for_grains(grains)\n", + " \n", + " print(\"Whole sample mask recon\")\n", + " whole_sample_sino, xedges, yedges = np.histogram2d(cf_4d.dty, cf_4d.omega, bins=[ds.ybinedges, ds.obinedges])\n", + " whole_sample_recon = run_iradon(whole_sample_sino, ds.obincens, pad, shift, workers=nthreads, apply_halfmask=is_half_scan, mask_central_zingers=is_half_scan)\n", + " \n", + " recon_man_mask = apply_manual_mask(whole_sample_recon)\n", + " \n", + " if manual_threshold is None:\n", + " thresh = threshold_otsu(recon_man_mask)\n", + " else:\n", + " thresh = manual_threshold\n", + " \n", + " binary = recon_man_mask > thresh\n", + " whole_sample_mask = convex_hull_image(binary)\n", + " \n", + " gord, inds = get_2d_peaks_from_4d_peaks(ds.pk2d, cf_strong)\n", + " \n", + " print(\"Building sinograms\")\n", + " for grain_label, gs in enumerate(tqdm(grainsinos)):\n", + " gs.prepare_peaks_from_4d(cf_strong, gord, inds, grain_label, hkltol)\n", + " gs.build_sinogram()\n", + " \n", + " if is_half_scan:\n", + " gs.correct_halfmask()\n", + " \n", + " if correct_sinos_with_ring_current:\n", + " print(\"Correcting for ring current\")\n", + " ds.get_ring_current_per_scan()\n", + " for gs in grainsinos:\n", + " gs.correct_ring_current(is_half_scan=is_half_scan)\n", + " \n", + " for gs in grainsinos:\n", + " gs.update_recon_parameters(pad=pad, shift=shift, mask=whole_sample_mask, niter=niter, y0=y0)\n", + " \n", + " for gs in tqdm(grainsinos):\n", + " gs.recon(method=\"astra\")\n", + "\n", + " if is_half_scan:\n", + " halfmask_radius = 25\n", + " gs.mask_central_zingers(\"astra\", radius=halfmask_radius)\n", + " \n", + " print(\"Final save\")\n", + " slice_arrays = build_slice_arrays(grainsinos, cutoff_level=cutoff_level, method=\"mlem\")\n", + " write_h5(ds.grainsfile, grainsinos, write_grains_too=True)\n", + " write_slice_recon(ds.grainsfile, slice_arrays)\n", + " \n", + " ds.save()\n", + "\n", + "print(\"Done!\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (main)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map_minor_phase_astra.ipynb b/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map_minor_phase_astra.ipynb new file mode 100644 index 00000000..0fdae469 --- /dev/null +++ b/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map_minor_phase_astra.ipynb @@ -0,0 +1,997 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Jupyter notebook based on ImageD11 to process scanning 3DXRD data\n", + "# Written by Haixing Fang, Jon Wright and James Ball\n", + "## Date: 28/03/2024" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# USER: Change the path below to point to your local copy of ImageD11:\n", + "\n", + "import os\n", + "\n", + "home_dir = !echo $HOME\n", + "home_dir = str(home_dir[0])\n", + "\n", + "# USER: You can change this location if you want\n", + "\n", + "id11_code_path = os.path.join(home_dir, \"Code/ImageD11\")\n", + "\n", + "import sys\n", + "\n", + "sys.path.insert(0, id11_code_path)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# import functions we need\n", + "\n", + "import concurrent.futures\n", + "\n", + "%matplotlib ipympl\n", + "\n", + "import h5py\n", + "from tqdm.notebook import tqdm\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt\n", + "\n", + "import ImageD11.columnfile\n", + "import ImageD11.sinograms.dataset\n", + "from ImageD11.grain import grain\n", + "from ImageD11.peakselect import select_ring_peaks_by_intensity, rings_mask\n", + "from ImageD11.sinograms.sinogram import GrainSinogram, build_slice_arrays, write_slice_recon, write_h5, read_h5, get_2d_peaks_from_4d_peaks\n", + "from ImageD11.sinograms.roi_iradon import run_iradon\n", + "\n", + "from skimage.filters import threshold_otsu\n", + "from skimage.morphology import convex_hull_image\n", + "\n", + "import ImageD11.nbGui.nb_utils as utils\n", + "\n", + "import ipywidgets as widgets\n", + "from ipywidgets import interact" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# NOTE: For old datasets before the new directory layout structure, we don't distinguish between RAW_DATA and PROCESSED_DATA\n", + "\n", + "### USER: specify your experimental directory\n", + "\n", + "rawdata_path = \"/data/visitor/ihma439/id11/20231211/RAW_DATA\"\n", + "\n", + "!ls -lrt {rawdata_path}\n", + "\n", + "### USER: specify where you want your processed data to go\n", + "\n", + "processed_data_root_dir = \"/data/visitor/ihma439/id11/20231211/PROCESSED_DATA/James/nb_testing\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# USER: pick a sample and a dataset you want to segment\n", + "\n", + "sample = \"FeAu_0p5_tR_nscope\"\n", + "dataset = \"top_100um\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# desination of H5 files\n", + "\n", + "dset_path = os.path.join(processed_data_root_dir, sample, f\"{sample}_{dataset}\", f\"{sample}_{dataset}_dataset.h5\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Load the dataset (for motor positions, not sure why these are not in peaks)\n", + "ds = ImageD11.sinograms.dataset.load(dset_path)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# If the sinograms are only half-sinograms (we scanned dty across half the sample rather than the full sample), set the below to true:\n", + "is_half_scan = False" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "if is_half_scan:\n", + " ds.correct_bins_for_half_scan()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Import 4D peaks\n", + "\n", + "cf_4d = ds.get_cf_4d_from_disk()\n", + "\n", + "cf_4d.parameters.loadparameters(ds.parfile)\n", + "cf_4d.updateGeometry()\n", + "\n", + "print(f\"Read {cf_4d.nrows} 4D peaks\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# isolate main phase peaks, and remove them from the dataset\n", + "\n", + "major_phase_cf_dstol = 0.0075\n", + "major_phase_peaks_mask = rings_mask(cf_4d, dstol=major_phase_cf_dstol, dsmax=cf_4d.ds.max())\n", + "\n", + "minor_phase_peaks = cf_4d.copy()\n", + "minor_phase_peaks.filter(~major_phase_peaks_mask)\n", + "\n", + "# Update geometry for minor phase peaks\n", + "\n", + "minor_phase_par_file = os.path.join(processed_data_root_dir, '../../../SCRIPTS/James/S3DXRD/Au.par')\n", + "major_phase_par_file = ds.parfile\n", + "\n", + "ds.parfile = minor_phase_par_file\n", + "\n", + "minor_phase_peaks.parameters.loadparameters(ds.parfile)\n", + "minor_phase_peaks.updateGeometry()\n", + "\n", + "cf_strong_frac = 0.95\n", + "cf_strong_dstol = 0.005\n", + "\n", + "cf_strong = select_ring_peaks_by_intensity(minor_phase_peaks, dstol=cf_strong_dstol, dsmax=minor_phase_peaks.ds.max(), frac=cf_strong_frac, doplot=0.01)\n", + "print(cf_strong.nrows)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "phase_name = \"Au\"\n", + "\n", + "minor_phase_grains_path = os.path.splitext(ds.grainsfile)[0] + f'_{phase_name}.h5'\n", + "\n", + "grains = ImageD11.grain.read_grain_file_h5(minor_phase_grains_path)\n", + "print(f\"{len(grains)} grains imported\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# load major phase grain reconstruction\n", + "# for pad and recon shifts\n", + "\n", + "major_phase_grainsinos = read_h5(ds.grainsfile, ds)\n", + "whole_sample_mask = major_phase_grainsinos[0].recon_mask\n", + "shift = major_phase_grainsinos[0].recon_shift\n", + "pad = major_phase_grainsinos[0].recon_pad\n", + "y0 = major_phase_grainsinos[0].recon_y0\n", + "\n", + "print(shift, y0, pad)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# assign peaks to the grains\n", + "\n", + "peak_assign_tol = 0.25\n", + "utils.assign_peaks_to_grains(grains, cf_strong, peak_assign_tol)\n", + "\n", + "for grain_label, g in enumerate(grains):\n", + " g.npks_4d = np.sum(cf_strong.grain_id == grain_label)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# let's make a GrainSinogram object for each grain\n", + "\n", + "grainsinos = [GrainSinogram(g, ds) for g in grains]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Now let's determine the positions of each grain from the 4D peaks\n", + "\n", + "for grain_label, gs in enumerate(grainsinos):\n", + " gs.update_lab_position_from_peaks(cf_strong, grain_label)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# We can also determine the RGB IPF colours of the grains which will be useful for plotting\n", + "# To do this, we first need to set a reference unitcell for each grain\n", + "# This will be used to determine the Orix Phase and therefore Orix Orientation\n", + "\n", + "cf_pars = cf_strong.parameters.get_parameters()\n", + "spacegroup = 225 # spacegroup for FCC Au\n", + "cf_pars[\"cell_lattice_[P,A,B,C,I,F,R]\"] = spacegroup\n", + "\n", + "ref_ucell = ImageD11.unitcell.unitcell_from_parameters(cf_pars)\n", + "\n", + "for g in grains:\n", + " g.ref_unitcell = ref_ucell\n", + "\n", + "# Now colours should work\n", + "\n", + "utils.get_rgbs_for_grains(grains)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "utils.plot_all_ipfs(grains)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Now we can plot our grain positions and RGB colours:\n", + "\n", + "# plt.style.use('dark_background')\n", + "fig, ax = plt.subplots(2,2, figsize=(12,12))\n", + "a = ax.ravel()\n", + "x = [g.translation[1] for g in grains]\n", + "y = [g.translation[0] for g in grains]\n", + "s = [g.npks_4d/10 for g in grains]\n", + "a[0].scatter(x, y, c=[g.rgb_z for g in grains], s=s)\n", + "a[0].set(title='IPF color Z', aspect='equal')\n", + "a[1].scatter(x, y, c=[g.rgb_y for g in grains], s=s)\n", + "a[1].set(title='IPF color Y', aspect='equal')\n", + "a[2].scatter(x, y, c=[g.rgb_x for g in grains], s=s)\n", + "a[2].set(title='IPF color X', aspect='equal')\n", + "a[3].scatter(x, y, c=s)\n", + "a[3].set(title='Number of 4D peaks', aspect='equal')\n", + "\n", + "fig.supxlabel(\"Lab y (transverse)\")\n", + "fig.supylabel(\"Lab x (beam)\")\n", + "\n", + "for a in ax.ravel():\n", + " a.invert_xaxis()\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# now we have a whole-sample reconstruction we can use as a sample mask\n", + "# let's build the sinograms for our grains\n", + "# before we do this, we need to determine our 2D peaks that will be used for the sinogram\n", + "# here we can get them from the 4D peaks:\n", + "\n", + "hkltol = 0.25\n", + "\n", + "gord, inds = get_2d_peaks_from_4d_peaks(ds.pk2d, cf_strong)\n", + "\n", + "for grain_label, gs in enumerate(tqdm(grainsinos)):\n", + " gs.prepare_peaks_from_4d(cf_strong, gord, inds, grain_label, hkltol)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# now we can actually generate the sinograms\n", + "\n", + "for gs in tqdm(grainsinos):\n", + " gs.build_sinogram()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# optionally correct the halfmask:\n", + "\n", + "if is_half_scan:\n", + " for gs in grainsinos:\n", + " gs.correct_halfmask()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Show sinogram of single grain\n", + "\n", + "gs = grainsinos[0]\n", + "\n", + "fig, ax = plt.subplots()\n", + "\n", + "ax.imshow(gs.ssino, aspect='auto')\n", + "ax.set_title(\"ssino\")\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# We can optionally correct each row of the sinogram by the ring current of that rotation\n", + "# This helps remove artifacts in the reconstruction\n", + "\n", + "correct_sinos_with_ring_current = True\n", + "if correct_sinos_with_ring_current:\n", + " \n", + " ds.get_ring_current_per_scan()\n", + " \n", + " for gs in grainsinos:\n", + " gs.correct_ring_current(is_half_scan=is_half_scan)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Show sinogram of single grain\n", + "\n", + "gs = grainsinos[0]\n", + "\n", + "fig, ax = plt.subplots()\n", + "\n", + "ax.imshow(gs.ssino, aspect='auto')\n", + "ax.set_title(\"ssino\")\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# go straight to ASTRA\n", + "\n", + "gs = grainsinos[0]\n", + "\n", + "# update the parameters used for the iradon reconstruction\n", + "\n", + "niter = 500\n", + "\n", + "gs.update_recon_parameters(pad=pad, shift=shift, y0=y0, niter=niter, mask=whole_sample_mask)\n", + "\n", + "gs.recon(method=\"astra\", astra_method=\"FBP_CUDA\")\n", + "\n", + "if is_half_scan:\n", + " halfmask_radius = 25\n", + " gs.mask_central_zingers(\"iradon\", radius=halfmask_radius)\n", + "\n", + "# view the result\n", + "\n", + "fig, axs = plt.subplots(1,2, figsize=(10,5))\n", + "axs[0].imshow(gs.ssino, aspect='auto')\n", + "axs[0].set_title(\"ssino\")\n", + "axs[1].imshow(gs.recons[\"astra\"], vmin=0, origin=\"lower\")\n", + "axs[1].set_title(\"Astra\")\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# once you're happy with the reconstruction parameters used, set them for all the grains\n", + "\n", + "for gs in grainsinos:\n", + " gs.update_recon_parameters(pad=pad, shift=shift, mask=whole_sample_mask, niter=niter, y0=y0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# reconstruct all grains\n", + "\n", + "for gs in tqdm(grainsinos):\n", + " gs.recon(method=\"astra\", astra_method=\"SART_CUDA\")\n", + "\n", + " if is_half_scan:\n", + " gs.mask_central_zingers(\"astra\", radius=halfmask_radius)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "fig, a = plt.subplots(1,2,figsize=(10,5))\n", + "rec = a[0].imshow(grainsinos[0].recons[\"astra\"], vmin=0, origin=\"lower\")\n", + "sin = a[1].imshow(grainsinos[0].ssino, aspect='auto')\n", + "\n", + "# Function to update the displayed image based on the selected frame\n", + "def update_frame(i):\n", + " rec.set_array(grainsinos[i].recons[\"astra\"])\n", + " sin.set_array(grainsinos[i].ssino)\n", + " a[0].set(title=str(i))\n", + " fig.canvas.draw()\n", + "\n", + "# Create a slider widget to select the frame number\n", + "frame_slider = widgets.IntSlider(\n", + " value=0,\n", + " min=0,\n", + " max=len(grains) - 1,\n", + " step=1,\n", + " description='Grain:'\n", + ")\n", + "\n", + "interact(update_frame, i=frame_slider)\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Let's assemble all the recons into one map\n", + "\n", + "rgb_x_array, rgb_y_array, rgb_z_array, grain_labels_array, raw_intensity_array = build_slice_arrays(grainsinos, cutoff_level=0.9, method=\"astra\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# plot initial output\n", + "\n", + "fig, ax = plt.subplots(constrained_layout=True)\n", + "ax.imshow(rgb_z_array, origin=\"lower\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "fig, ax = plt.subplots(constrained_layout=True)\n", + "ax.imshow(grain_labels_array, origin=\"lower\") # originally 1,2,0\n", + "ax.set_title(\"Grain label map\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "fig, ax = plt.subplots(constrained_layout=True)\n", + "ax.imshow(raw_intensity_array, origin=\"lower\")\n", + "ax.set_title(\"Raw intensity array\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# There will likely be many streaks, indicating a few grains have dodgy reconstructions and are probably not to be trusted\n", + "# To fix this, we can count how many pixels in the grain labels array each grain has\n", + "# It can be helpful to run this filtration more than once\n", + "\n", + "labels, counts = np.unique(grain_labels_array, return_counts=True)\n", + "\n", + "fig, ax = plt.subplots()\n", + "ax.plot(labels[labels > 0], counts[labels > 0])\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# filter out grains with more than 10 pixels in the label map\n", + "# this normally indicates a dodgy reconstruction for this grain\n", + "# only really applies if the grains are very small!\n", + "\n", + "grain_too_many_px = 10\n", + "\n", + "bad_gids = [int(label) for (label, count) in zip(labels, counts) if count > grain_too_many_px and label > 0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# before we filter, determine our grain labels\n", + "# this is so we know which labels to give our grains in the filtered grain map\n", + "# such that it still agrees with the grain order\n", + "\n", + "print(f\"{len(grainsinos)} grains before filtration\")\n", + "grainsinos_clean = [gs for (inc, gs) in enumerate(grainsinos) if inc not in bad_gids]\n", + "grain_labels_clean = [inc for (inc, gs) in enumerate(grainsinos) if inc not in bad_gids]\n", + "print(f\"{len(grainsinos_clean)} grains after filtration\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Let's assemble all the recons into one map\n", + "\n", + "cutoff_level = 0.7\n", + "\n", + "slice_arrays = build_slice_arrays(grainsinos_clean, cutoff_level=cutoff_level, grain_labels=grain_labels_clean, method=\"astra\")\n", + "rgb_x_array, rgb_y_array, rgb_z_array, grain_labels_array, raw_intensity_array = slice_arrays" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# plot initial output\n", + "\n", + "fig, ax = plt.subplots(constrained_layout=True)\n", + "ax.imshow(rgb_z_array, origin=\"lower\") # originally 1,2,0\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig, ax = plt.subplots(constrained_layout=True)\n", + "ax.imshow(grain_labels_array, origin=\"lower\") # originally 1,2,0\n", + "ax.set_title(\"Grain label map\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fig, ax = plt.subplots(constrained_layout=True)\n", + "ax.imshow(raw_intensity_array, origin=\"lower\") # originally 1,2,0\n", + "ax.set_title(\"Raw intensity array\")\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# we can determine improved positions of our grains from the positions of their reconstructions\n", + "\n", + "for gs in tqdm(grainsinos):\n", + " gs.update_lab_position_from_recon(method=\"astra\")\n", + " \n", + "# change the y0 back to what we imported at the beginning:\n", + "\n", + "for gs in grainsinos:\n", + " gs.update_recon_parameters(y0=y0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# Now we can plot our grain positions and RGB colours:\n", + "\n", + "# plt.style.use('dark_background')\n", + "fig, ax = plt.subplots(2,2, figsize=(12,12))\n", + "a = ax.ravel()\n", + "x = [g.translation[1] for g in grains]\n", + "y = [g.translation[0] for g in grains]\n", + "s = [g.npks_4d/10 for g in grains]\n", + "a[0].scatter(x, y, c=[g.rgb_z for g in grains], s=s)\n", + "a[0].set(title='IPF color Z', aspect='equal')\n", + "a[1].scatter(x, y, c=[g.rgb_y for g in grains], s=s)\n", + "a[1].set(title='IPF color Y', aspect='equal')\n", + "a[2].scatter(x, y, c=[g.rgb_x for g in grains], s=s)\n", + "a[2].set(title='IPF color X', aspect='equal')\n", + "a[3].scatter(x, y, c=s)\n", + "a[3].set(title='Number of 4D peaks', aspect='equal')\n", + "\n", + "fig.supxlabel(\"Lab y (transverse)\")\n", + "fig.supylabel(\"Lab x (beam)\")\n", + "\n", + "for a in ax.ravel():\n", + " a.invert_xaxis()\n", + "\n", + "plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# write our results to disk\n", + "\n", + "write_h5(minor_phase_grains_path, grainsinos, write_grains_too=True)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "# write the slice maps to disk too\n", + "\n", + "write_slice_recon(minor_phase_grains_path, slice_arrays)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "ds.parfile = major_phase_par_file\n", + "\n", + "ds.save()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "if 1:\n", + " raise ValueError(\"Change the 1 above to 0 to allow 'Run all cells' in the notebook\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Now that we're happy with our indexing parameters, we can run the below cell to do this in bulk for many samples/datasets\n", + "# by default this will do all samples in sample_list, all datasets with a prefix of dset_prefix\n", + "# you can add samples and datasets to skip in skips_dict\n", + "\n", + "skips_dict = {\n", + " \"FeAu_0p5_tR_nscope\": [\"top_-50um\", \"top_-100um\"]\n", + "}\n", + "\n", + "dset_prefix = \"top\"\n", + "\n", + "sample_list = [\"FeAu_0p5_tR_nscope\"]\n", + " \n", + "samples_dict = utils.find_datasets_to_process(rawdata_path, skips_dict, dset_prefix, sample_list)\n", + " \n", + "# manual override:\n", + "# samples_dict = {\"FeAu_0p5_tR_nscope\": [\"top_100um\", \"top_200um\"]}\n", + " \n", + "# now we have our samples_dict, we can process our data:\n", + "\n", + "\n", + "for sample, datasets in samples_dict.items():\n", + " for dataset in datasets:\n", + " print(f\"Processing dataset {dataset} in sample {sample}\")\n", + " dset_path = os.path.join(processed_data_root_dir, sample, f\"{sample}_{dataset}\", f\"{sample}_{dataset}_dataset.h5\")\n", + " if not os.path.exists(dset_path):\n", + " print(f\"Missing DataSet file for {dataset} in sample {sample}, skipping\")\n", + " continue\n", + " \n", + " print(\"Importing DataSet object\")\n", + " \n", + " ds = ImageD11.sinograms.dataset.load(dset_path)\n", + " print(f\"I have a DataSet {ds.dset} in sample {ds.sample}\")\n", + " \n", + " minor_phase_grains_path = os.path.splitext(ds.grainsfile)[0] + f'_{phase_name}.h5'\n", + " \n", + " if not os.path.exists(minor_phase_grains_path):\n", + " print(f\"Missing grains file for {dataset} in sample {sample}, skipping\")\n", + " continue\n", + " \n", + " # check grains file for existance of slice_recon, skip if it's there\n", + " with h5py.File(minor_phase_grains_path, \"r\") as hin:\n", + " if \"slice_recon\" in hin.keys():\n", + " print(f\"Already reconstructed {dataset} in {sample}, skipping\")\n", + " continue\n", + " \n", + " if is_half_scan:\n", + " ds.correct_bins_for_half_scan()\n", + " \n", + " print(\"Importing peaks\")\n", + " cf_4d = ds.get_cf_4d_from_disk()\n", + " cf_4d.parameters.loadparameters(ds.parfile)\n", + " cf_4d.updateGeometry()\n", + " \n", + " print(\"Filtering peaks\")\n", + " major_phase_peaks_mask = rings_mask(cf_4d, dstol=major_phase_cf_dstol, dsmax=cf_4d.ds.max())\n", + " minor_phase_peaks = cf_4d.copy()\n", + " minor_phase_peaks.filter(~major_phase_peaks_mask)\n", + " major_phase_par_file = ds.parfile\n", + " ds.parfile = minor_phase_par_file\n", + " minor_phase_peaks.parameters.loadparameters(ds.parfile)\n", + " minor_phase_peaks.updateGeometry()\n", + " cf_strong = select_ring_peaks_by_intensity(minor_phase_peaks, dstol=cf_strong_dstol, dsmax=minor_phase_peaks.ds.max(), frac=cf_strong_frac)\n", + " \n", + " print(\"Importing grains\")\n", + " minor_phase_grains_path = os.path.splitext(ds.grainsfile)[0] + f'_{phase_name}.h5'\n", + " grains = ImageD11.grain.read_grain_file_h5(minor_phase_grains_path)\n", + "\n", + " major_phase_grainsinos = read_h5(ds.grainsfile, ds)\n", + " whole_sample_mask = major_phase_grainsinos[0].recon_mask\n", + " shift = major_phase_grainsinos[0].recon_shift\n", + " pad = major_phase_grainsinos[0].recon_pad\n", + " \n", + " utils.assign_peaks_to_grains(grains, cf_strong, tol=peak_assign_tol)\n", + " for grain_label, g in enumerate(grains):\n", + " g.npks_4d = np.sum(cf_strong.grain_id == grain_label)\n", + " \n", + " grainsinos = [GrainSinogram(g, ds) for g in grains]\n", + " \n", + " for grain_label, gs in enumerate(grainsinos):\n", + " gs.update_lab_position_from_peaks(cf_strong, grain_label)\n", + " \n", + " cf_pars = cf_strong.parameters.get_parameters()\n", + " cf_pars[\"cell_lattice_[P,A,B,C,I,F,R]\"] = spacegroup\n", + " ref_ucell = ImageD11.unitcell.unitcell_from_parameters(cf_pars)\n", + " for g in grains:\n", + " g.ref_unitcell = ref_ucell\n", + "\n", + " utils.get_rgbs_for_grains(grains)\n", + " \n", + " print(\"Building sinograms\")\n", + " for grain_label, gs in enumerate(tqdm(grainsinos)):\n", + " gs.prepare_peaks_from_4d(cf_strong, grain_label, hkltol)\n", + " gs.build_sinogram()\n", + " \n", + " if is_half_scan:\n", + " gs.correct_halfmask()\n", + " \n", + " if correct_sinos_with_ring_current:\n", + " ds.get_ring_current_per_scan()\n", + " for gs in grainsinos:\n", + " gs.correct_ring_current(is_half_scan=is_half_scan)\n", + " \n", + " for gs in grainsinos:\n", + " gs.update_recon_parameters(pad=pad, shift=shift, mask=whole_sample_mask)\n", + " \n", + " with concurrent.futures.ThreadPoolExecutor(max_workers= max(1,nthreads-1)) as pool:\n", + " for i in tqdm(pool.map(GrainSinogram.recon, grainsinos), total=len(grainsinos)):\n", + " pass\n", + " \n", + " if is_half_scan:\n", + " for gs in grainsinos:\n", + " gs.mask_central_zingers(\"iradon\", radius=halfmask_radius)\n", + "\n", + " rgb_x_array, rgb_y_array, rgb_z_array, grain_labels_array, raw_intensity_array = build_slice_arrays(grainsinos, cutoff_level)\n", + " labels, counts = np.unique(grain_labels_array, return_counts=True)\n", + " bad_gids = [int(label) for (label, count) in zip(labels, counts) if count > grain_too_many_px and label > 0]\n", + " \n", + " grainsinos_clean = [gs for (inc, gs) in enumerate(grainsinos) if inc not in bad_gids]\n", + " grain_labels_clean = [inc for (inc, gs) in enumerate(grainsinos) if inc not in bad_gids]\n", + " \n", + " slice_arrays = build_slice_arrays(grainsinos_clean, cutoff_level=cutoff_level, grain_labels=grain_labels_clean)\n", + " \n", + " for gs in tqdm(grainsinos):\n", + " gs.update_lab_position_from_recon()\n", + " \n", + " for gs in grainsinos:\n", + " gs.update_recon_parameters(y0=y0)\n", + " \n", + " write_h5(minor_phase_grains_path, grainsinos, write_grains_too=True)\n", + " write_slice_recon(minor_phase_grains_path, slice_arrays)\n", + "\n", + " ds.parfile = major_phase_par_file\n", + " ds.save()\n", + "\n", + "print(\"Done!\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3 (main)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.11.6" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/ImageD11/sinograms/sinogram.py b/ImageD11/sinograms/sinogram.py index 4341da38..900d786b 100644 --- a/ImageD11/sinograms/sinogram.py +++ b/ImageD11/sinograms/sinogram.py @@ -3,7 +3,6 @@ import h5py import numba import numpy as np -from skimage.filters import threshold_otsu import ImageD11.grain import ImageD11.cImageD11 @@ -207,9 +206,9 @@ def update_recon_parameters(self, pad=None, shift=None, mask=None, niter=None, y if y0 is not None: self.recon_y0 = y0 - def recon(self, method="iradon", workers=1): + def recon(self, method="iradon", workers=1, **extra_args): """Performs reconstruction given reconstruction method""" - if method not in ["iradon", "mlem"]: + if method not in ["iradon", "mlem", "astra"]: raise ValueError("Unsupported method!") if self.ssino is None or self.sinoangles is None: raise ValueError("Sorted sino or sinoangles are missing/have not been computed, unable to reconstruct.") @@ -230,13 +229,16 @@ def recon(self, method="iradon", workers=1): recon_function = partial(ImageD11.sinograms.roi_iradon.run_mlem, niter=self.recon_niter) else: recon_function = ImageD11.sinograms.roi_iradon.run_mlem + elif method == "astra": + recon_function = run_astra recon = recon_function(sino=sino, angles=angles, pad=self.recon_pad, shift=self.recon_shift, workers=workers, - mask=self.recon_mask) + mask=self.recon_mask, + **extra_args) self.recons[method] = recon @@ -328,6 +330,58 @@ def from_h5py_group(cls, group, ds, grain): return grainsino_obj +def run_astra(sino, angles, shift=0, pad=0, mask=None, niter=100, astra_method='SIRT_CUDA', workers=None): + import astra + angles = np.radians(angles) + allowed_methods = ['BP', 'SIRT', 'BP_CUDA', 'FBP_CUDA', 'SIRT_CUDA', 'SART_CUDA', 'CGLS_CUDA', 'EM_CUDA'] + if astra_method not in allowed_methods: + raise ValueError("Unsupported method!") + if astra_method == 'EM_CUDA' and mask is not None: + print("Can't use mask with EM_CUDA method!") + mask = None + + vol_geom = astra.create_vol_geom((sino.shape[0]+pad, sino.shape[0]+pad)) + proj_geom = astra.create_proj_geom('parallel', 1.0, sino.shape[0], angles) + if shift != 0: + proj_geom = astra.functions.geom_postalignment(proj_geom, shift) + proj_id = astra.create_projector('linear', proj_geom, vol_geom) + proj_data_id = astra.data2d.create('-sino', proj_geom, data=sino.T) + if astra_method == 'EM_CUDA': + # initialise with ones + rec_id = astra.data2d.create('-vol', vol_geom, 1.0) + else: + rec_id = astra.data2d.create('-vol', vol_geom, 0.0) + + cfg = astra.creators.astra_dict(astra_method) + + if 'CUDA' not in astra_method: + cfg['ProjectorId'] = proj_id + cfg['ProjectionDataId'] = proj_data_id + cfg['ReconstructionDataId'] = rec_id + cfg['option'] = {} + cfg['option']['MinConstraint'] = 0 + cfg['option']['MaxConstraint'] = 1 + + if mask is not None: + mask_id = astra.data2d.create('-vol', vol_geom, mask) + cfg['option']['ReconstructionMaskId'] = mask_id + + alg_id = astra.algorithm.create(cfg) + astra.algorithm.run(alg_id, iterations=niter) + recon = astra.data2d.get(rec_id) + + # Clean up. + astra.algorithm.delete(alg_id) + astra.data2d.delete(rec_id) + astra.projector.delete(proj_id) + astra.projector.delete(proj_data_id) + + if mask is not None: + astra.data2d.delete(mask_id) + + return recon + + def write_h5(filename, list_of_sinos, write_grains_too=True): """Write list of GrainSinogram objects to H5Py file If write_grains_too is True, will also write self.grain to the same file""" @@ -462,6 +516,122 @@ def norm(r): +from skimage.filters import threshold_otsu +from skimage.filters import threshold_otsu, threshold_local, threshold_yen, threshold_li +from skimage.morphology import disk, white_tophat, remove_small_holes +from skimage.segmentation import watershed +from skimage.measure import label + +def norm(r): + m = r > r.max() * 0.2 + return (r / r[m].mean()).clip(0, 1) + +def filter_normalise_recon(recon): + """Function to filter and normalise reconstructions""" + # determine threshold of grain vs background + thresh = threshold_yen(recon) + + # determine binary mask of grain + binary = recon > thresh + + # this still leaves some hot pixels behind + footprint = disk(8) + res = white_tophat(binary, footprint) + binary_clean = binary ^ res + + # binary_clean might contain holes + binary_clean_noholes = remove_small_holes(binary_clean) + + # fig, ax = plt.subplots() + # ax.imshow(binary_clean_noholes, origin="lower") + # plt.show() + + # determine masked recon image + recon_masked = np.where(binary_clean_noholes, recon, 0) + + # to normalise, we first need to segment into contiguous blobs + # normalise each of them separetely + # label image regions + label_image = label(binary_clean_noholes) + + recon_masked_normed = np.zeros_like(recon) + + + for region_id in np.unique(label_image.ravel())[1:]: + recon_this_region_only = np.where(label_image == region_id, recon_masked, 0) + recon_this_region_only_normed = norm(recon_this_region_only) + recon_masked_normed = np.where(label_image == region_id, recon_this_region_only_normed, recon_masked_normed) + + return recon_masked_normed + + + +def build_slice_arrays_clean(grainsinos, method="iradon", grain_labels=None): + """Build grain maps from individual grain reonstructions + Cleans and normalises reconstructions before building slice arrays + Saves cleaned and normalised reconstruction to gs.recons[method_norm] + Optionally provide a different list of grain labels to label the grains""" + + if grain_labels is not None: + assert len(grainsinos) == len(grain_labels) + + first_recon = grainsinos[0].recons[method] + grain_labels_array = np.zeros_like(first_recon) - 1 + + redx = np.zeros_like(first_recon) + grnx = np.zeros_like(first_recon) + blux = np.zeros_like(first_recon) + + redy = np.zeros_like(first_recon) + grny = np.zeros_like(first_recon) + bluy = np.zeros_like(first_recon) + + redz = np.zeros_like(first_recon) + grnz = np.zeros_like(first_recon) + bluz = np.zeros_like(first_recon) + + raw_intensity_array = np.zeros_like(first_recon) + + for i, gs in enumerate(grainsinos): + + if grain_labels is not None: + label = grain_labels[i] + else: + label = i + + # the grain should win where its intensity is greater than what's there before + g_raw_intensity_map = filter_normalise_recon(gs.recons[method]) + gs.recons[method + "_norm"] = g_raw_intensity_map + + g_raw_intensity_mask = g_raw_intensity_map > raw_intensity_array + + raw_intensity_array = np.where(g_raw_intensity_mask, g_raw_intensity_map, raw_intensity_array) + + redx = np.where(g_raw_intensity_mask, g_raw_intensity_map * gs.grain.rgb_x[0], redx) + redy = np.where(g_raw_intensity_mask, g_raw_intensity_map * gs.grain.rgb_y[0], redy) + redz = np.where(g_raw_intensity_mask, g_raw_intensity_map * gs.grain.rgb_z[0], redz) + + grnx = np.where(g_raw_intensity_mask, g_raw_intensity_map * gs.grain.rgb_x[1], grnx) + grny = np.where(g_raw_intensity_mask, g_raw_intensity_map * gs.grain.rgb_y[1], grny) + grnz = np.where(g_raw_intensity_mask, g_raw_intensity_map * gs.grain.rgb_z[1], grnz) + + blux = np.where(g_raw_intensity_mask, g_raw_intensity_map * gs.grain.rgb_x[2], blux) + bluy = np.where(g_raw_intensity_mask, g_raw_intensity_map * gs.grain.rgb_y[2], bluy) + bluz = np.where(g_raw_intensity_mask, g_raw_intensity_map * gs.grain.rgb_z[2], bluz) + + grain_labels_array[g_raw_intensity_mask] = label + + # redx has shape (i, j) + # (redx, grnx, blux) has shape (3, i, j) + # transpose changes this to (i, j, 3) needed for mpl imshow + # crucially: (i, j) unaffected + + rgb_x_array = np.transpose((redx, grnx, blux), axes=(1, 2, 0)) + rgb_y_array = np.transpose((redy, grny, bluy), axes=(1, 2, 0)) + rgb_z_array = np.transpose((redz, grnz, bluz), axes=(1, 2, 0)) + + return rgb_x_array, rgb_y_array, rgb_z_array, grain_labels_array, raw_intensity_array + # TODO: Use Silx Nexus IO instead? # This is a temporary sensible middle ground! From 622424c9f2cf395d406c7edf5ef5f33d348d1d9a Mon Sep 17 00:00:00 2001 From: James Ball Date: Tue, 21 May 2024 16:08:41 +0200 Subject: [PATCH 2/6] Some bugfixes for notebooks --- .../3DXRD/1_3DXRD_refine_parameters.ipynb | 35 +++++++++++++++---- ImageD11/nbGui/3DXRD/2_3DXRD_index.ipynb | 28 +++++++++++++-- .../nbGui/S3DXRD/2_S3DXRD_sinograms_map.ipynb | 7 ++-- .../2_S3DXRD_sinograms_map_minor_phase.ipynb | 4 +-- 4 files changed, 61 insertions(+), 13 deletions(-) diff --git a/ImageD11/nbGui/3DXRD/1_3DXRD_refine_parameters.ipynb b/ImageD11/nbGui/3DXRD/1_3DXRD_refine_parameters.ipynb index 23b8448d..392e23a1 100755 --- a/ImageD11/nbGui/3DXRD/1_3DXRD_refine_parameters.ipynb +++ b/ImageD11/nbGui/3DXRD/1_3DXRD_refine_parameters.ipynb @@ -75,7 +75,8 @@ "import ImageD11.columnfile\n", "from ImageD11.sinograms import properties, dataset\n", "\n", - "from ImageD11.blobcorrector import eiger_spatial" + "from ImageD11.blobcorrector import eiger_spatial\n", + "from ImageD11.peakselect import select_ring_peaks_by_intensity" ] }, { @@ -198,7 +199,7 @@ "# this indicates the fractional intensity cutoff we will select\n", "# if the blue line does not look elbow-shaped in the logscale plot, try changing the \"doplot\" parameter (the y scale of the logscale plot) until it does\n", "\n", - "cf_strong = utils.selectpeaks(cf_3d, frac=0.985, dsmax=1.04, doplot=0.8, dstol=0.01)\n", + "cf_strong = select_ring_peaks_by_intensity(cf_3d, frac=0.985, dsmax=1.04, doplot=0.8, dstol=0.01)\n", "print(f\"Got {cf_strong.nrows} strong peaks for indexing\")\n", "cf_strong.writefile(f'{sample}_{dataset}_3d_peaks_strong.flt')" ] @@ -214,7 +215,7 @@ "# we will also export some additional strong peaks across all rings\n", "# this will be useful for grain refinement later (using makemap)\n", "\n", - "cf_strong_allrings = utils.selectpeaks(cf_3d, frac=0.95, dsmax=cf_3d.ds.max(), doplot=0.05, dstol=0.01)\n", + "cf_strong_allrings = select_ring_peaks_by_intensity(cf_3d, frac=0.95, dsmax=cf_3d.ds.max(), doplot=0.05, dstol=0.01)\n", "print(f\"Got {cf_strong_allrings.nrows} strong peaks for makemap\")\n", "cf_strong_allrings_path = f'{sample}_{dataset}_3d_peaks_strong_all_rings.flt'\n", "cf_strong_allrings.writefile(cf_strong_allrings_path)" @@ -307,6 +308,25 @@ "ImageD11.indexing.loglevel = 3" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# let's plot the assigned peaks\n", + "\n", + "fig, ax = plt.subplots()\n", + "\n", + "# indexer.ra is the ring assignments\n", + "\n", + "ax.scatter(cf_strong.ds, cf_strong.eta, c=indexer.ra, cmap='tab20', s=1)\n", + "ax.set_xlabel(\"d-star\")\n", + "ax.set_ylabel(\"eta\")\n", + "\n", + "plt.show()" + ] + }, { "cell_type": "code", "execution_count": null, @@ -396,7 +416,8 @@ "outputs": [], "source": [ "omegas_sorted = np.sort(ds.omega)[0]\n", - "omega_slop = np.round(np.diff(omegas_sorted).mean(), 3)\n", + "omega_step = np.round(np.diff(omegas_sorted).mean(), 3)\n", + "omega_slop = omega_step/2\n", "\n", "makemap_hkl_tol_seq = [0.05, 0.025, 0.01]" ] @@ -409,12 +430,14 @@ }, "outputs": [], "source": [ + "symmetry = \"cubic\"\n", + "\n", "for inc, makemap_tol in enumerate(makemap_hkl_tol_seq):\n", " print(f\"Running makemap {inc+1}/{len(makemap_hkl_tol_seq)}\")\n", " if inc == 0: # ubi into map\n", - " makemap_output = !makemap.py -p {ds.parfile} -u {tmp_ubi_path} -U {tmp_map_path} -f {cf_strong_allrings_path} -F {unindexed_flt_path} -s cubic -t {makemap_hkl_tol_seq[inc]} --omega_slop={omega_slop} --no_sort\n", + " makemap_output = !makemap.py -p {ds.parfile} -u {tmp_ubi_path} -U {tmp_map_path} -f {cf_strong_allrings_path} -F {unindexed_flt_path} -s {symmetry} -t {makemap_hkl_tol_seq[inc]} --omega_slop={omega_slop} --no_sort\n", " else: # map into map\n", - " makemap_output = !makemap.py -p {ds.parfile} -u {tmp_map_path} -U {tmp_map_path} -f {cf_strong_allrings_path} -F {unindexed_flt_path} -s cubic -t {makemap_hkl_tol_seq[inc]} --omega_slop={omega_slop} --no_sort" + " makemap_output = !makemap.py -p {ds.parfile} -u {tmp_map_path} -U {tmp_map_path} -f {cf_strong_allrings_path} -F {unindexed_flt_path} -s {symmetry} -t {makemap_hkl_tol_seq[inc]} --omega_slop={omega_slop} --no_sort" ] }, { diff --git a/ImageD11/nbGui/3DXRD/2_3DXRD_index.ipynb b/ImageD11/nbGui/3DXRD/2_3DXRD_index.ipynb index 65f25203..5c1acd0e 100755 --- a/ImageD11/nbGui/3DXRD/2_3DXRD_index.ipynb +++ b/ImageD11/nbGui/3DXRD/2_3DXRD_index.ipynb @@ -305,6 +305,25 @@ "ImageD11.indexing.loglevel = 3" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# let's plot the assigned peaks\n", + "\n", + "fig, ax = plt.subplots()\n", + "\n", + "# indexer.ra is the ring assignments\n", + "\n", + "ax.scatter(cf_strong.ds, cf_strong.eta, c=indexer.ra, cmap='tab20', s=1)\n", + "ax.set_xlabel(\"d-star\")\n", + "ax.set_ylabel(\"eta\")\n", + "\n", + "plt.show()" + ] + }, { "cell_type": "code", "execution_count": null, @@ -424,7 +443,8 @@ "outputs": [], "source": [ "omegas_sorted = np.sort(ds.omega)[0]\n", - "omega_slop = np.round(np.diff(omegas_sorted).mean(), 3)\n", + "omega_step = np.round(np.diff(omegas_sorted).mean(), 3)\n", + "omega_slop = omega_step/2\n", "\n", "makemap_hkl_tol_seq = [0.05, 0.025, 0.01]" ] @@ -437,12 +457,14 @@ }, "outputs": [], "source": [ + "symmetry = \"cubic\"\n", + "\n", "for inc, makemap_tol in enumerate(makemap_hkl_tol_seq):\n", " print(f\"Running makemap {inc+1}/{len(makemap_hkl_tol_seq)}\")\n", " if inc == 0: # ubi into map\n", - " makemap_output = !makemap.py -p {ds.parfile} -u {tmp_ubi_path} -U {tmp_map_path} -f {cf_strong_allrings_path} -F {unindexed_flt_path} -s cubic -t {makemap_hkl_tol_seq[inc]} --omega_slop={omega_slop} --no_sort\n", + " makemap_output = !makemap.py -p {ds.parfile} -u {tmp_ubi_path} -U {tmp_map_path} -f {cf_strong_allrings_path} -F {unindexed_flt_path} -s {symmetry} -t {makemap_hkl_tol_seq[inc]} --omega_slop={omega_slop} --no_sort\n", " else: # map into map\n", - " makemap_output = !makemap.py -p {ds.parfile} -u {tmp_map_path} -U {tmp_map_path} -f {cf_strong_allrings_path} -F {unindexed_flt_path} -s cubic -t {makemap_hkl_tol_seq[inc]} --omega_slop={omega_slop} --no_sort" + " makemap_output = !makemap.py -p {ds.parfile} -u {tmp_map_path} -U {tmp_map_path} -f {cf_strong_allrings_path} -F {unindexed_flt_path} -s {symmetry} -t {makemap_hkl_tol_seq[inc]} --omega_slop={omega_slop} --no_sort" ] }, { diff --git a/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map.ipynb b/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map.ipynb index a47a566b..1669c096 100644 --- a/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map.ipynb +++ b/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map.ipynb @@ -630,8 +630,8 @@ "outputs": [], "source": [ "fig, a = plt.subplots(1,2,figsize=(10,5))\n", - "rec = a[0].imshow(grainsinos[8].recons[\"iradon\"], vmin=0, origin=\"lower\")\n", - "sin = a[1].imshow(grainsinos[8].ssino, aspect='auto')\n", + "rec = a[0].imshow(grainsinos[0].recons[\"iradon\"], vmin=0, origin=\"lower\")\n", + "sin = a[1].imshow(grainsinos[0].ssino, aspect='auto')\n", "\n", "# Function to update the displayed image based on the selected frame\n", "def update_frame(i):\n", @@ -1044,6 +1044,9 @@ " print(\"Whole sample mask recon\")\n", " whole_sample_sino, xedges, yedges = np.histogram2d(cf_4d.dty, cf_4d.omega, bins=[ds.ybinedges, ds.obinedges])\n", " whole_sample_recon = run_iradon(whole_sample_sino, ds.obincens, pad, shift, workers=nthreads, apply_halfmask=is_half_scan, mask_central_zingers=is_half_scan)\n", + " \n", + " recon_man_mask = apply_manual_mask(whole_sample_recon)\n", + " \n", " if manual_threshold is None:\n", " thresh = threshold_otsu(recon_man_mask)\n", " else:\n", diff --git a/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map_minor_phase.ipynb b/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map_minor_phase.ipynb index 5f5e8015..d371297d 100755 --- a/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map_minor_phase.ipynb +++ b/ImageD11/nbGui/S3DXRD/2_S3DXRD_sinograms_map_minor_phase.ipynb @@ -539,8 +539,8 @@ "outputs": [], "source": [ "fig, a = plt.subplots(1,2,figsize=(10,5))\n", - "rec = a[0].imshow(grainsinos[8].recons[\"iradon\"], vmin=0, origin=\"lower\")\n", - "sin = a[1].imshow(grainsinos[8].ssino, aspect='auto')\n", + "rec = a[0].imshow(grainsinos[0].recons[\"iradon\"], vmin=0, origin=\"lower\")\n", + "sin = a[1].imshow(grainsinos[0].ssino, aspect='auto')\n", "\n", "# Function to update the displayed image based on the selected frame\n", "def update_frame(i):\n", From 07a0f01673310f2f84dceff9644d4ec89ed1a462 Mon Sep 17 00:00:00 2001 From: James Ball Date: Tue, 21 May 2024 16:08:56 +0200 Subject: [PATCH 3/6] Improve ring current correction --- ImageD11/sinograms/sinogram.py | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/ImageD11/sinograms/sinogram.py b/ImageD11/sinograms/sinogram.py index 900d786b..66ebd9e9 100644 --- a/ImageD11/sinograms/sinogram.py +++ b/ImageD11/sinograms/sinogram.py @@ -180,16 +180,22 @@ def correct_halfmask(self): """Applies halfmask correction to sinogram""" self.ssino = ImageD11.sinograms.roi_iradon.apply_halfmask_to_sino(self.ssino) - def correct_ring_current(self, is_half_scan=False): + def correct_ring_current(self, is_half_scan=False, min_ring_current_frac=0.5): """Corrects each row of the sinogram to the ring current of the corresponding scan""" + + # ignore ring current values below min_ring_current + mean_ring_current = np.mean(self.ds.ring_currents_per_scan_scaled) + min_ring_current = min_ring_current_frac * np.max(self.ds.ring_currents_per_scan_scaled) + ring_current_to_use = np.where(self.ds.ring_currents_per_scan_scaled < min_ring_current, mean_ring_current, self.ds.ring_currents_per_scan_scaled) + if is_half_scan: - correction = self.ds.ring_currents_per_scan_scaled - addition_length = len(self.ds.ybincens) - len(self.ds.ring_currents_per_scan_scaled) + correction = ring_current_to_use + addition_length = len(self.ds.ybincens) - len(ring_current_to_use) correction_halfmask_addition = np.zeros(addition_length) correction_halfmask_addition.fill(correction.max()) correction = np.concatenate((correction, correction_halfmask_addition)) else: - correction = self.ds.ring_currents_per_scan_scaled + correction = ring_current_to_use self.ssino = self.ssino / correction[:, None] def update_recon_parameters(self, pad=None, shift=None, mask=None, niter=None, y0=None): From 5b61a7641b25854cb71ba69b4926f81bec262e31 Mon Sep 17 00:00:00 2001 From: Thomas VINCENT Date: Tue, 28 May 2024 08:58:58 +0200 Subject: [PATCH 4/6] Make cibuildwheel verbose --- .github/workflows/release.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 7177bb7c..26d5e526 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -42,6 +42,7 @@ jobs: - name: Build wheels uses: pypa/cibuildwheel@v2.16.5 env: + CIBW_BUILD_VERBOSITY: 1 CIBW_PROJECT_REQUIRES_PYTHON: ">=3.7" CIBW_BUILD: cp37-* cp38-* cp39-* cp310-* cp311-* cp312-* # Do not build for pypy, muslinux and python3.12 on ppc64le From c07ab4de3cc1d311f83ecc8e4f3ae4706510b371 Mon Sep 17 00:00:00 2001 From: Thomas VINCENT Date: Tue, 28 May 2024 11:09:25 +0200 Subject: [PATCH 5/6] Change tarball name to lower case --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 26d5e526..0083369e 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -87,7 +87,7 @@ jobs: - name: Install and test sdist run: | - pip install --pre dist/ImageD11*.tar.gz + pip install --pre dist/imaged11*.tar.gz pytest test - uses: actions/upload-artifact@v4 From fd4c938e89224dd304d0835526de095fbbcdd227 Mon Sep 17 00:00:00 2001 From: Thomas VINCENT Date: Tue, 28 May 2024 12:16:39 +0200 Subject: [PATCH 6/6] Remove --pre from test install --- .github/workflows/release.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0083369e..d9b83bd1 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -87,7 +87,7 @@ jobs: - name: Install and test sdist run: | - pip install --pre dist/imaged11*.tar.gz + pip install dist/imaged11*.tar.gz pytest test - uses: actions/upload-artifact@v4