Skip to content

Commit

Permalink
ADD: 3D scatter plots
Browse files Browse the repository at this point in the history
  • Loading branch information
matbun committed Nov 8, 2023
1 parent 94254cf commit 63a7aa0
Show file tree
Hide file tree
Showing 6 changed files with 166 additions and 18 deletions.
80 changes: 76 additions & 4 deletions use-cases/3dgan/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -12,8 +12,22 @@ And select the "3DGAN" experiment.

The following is preliminary and not 100% ML/scientifically sound.

1. As inference dataset we can reuse training/validation dataset
2. As model, we can create a dummy version of it with:
1. As inference dataset we can reuse training/validation dataset,
for instance the one downloaded from Google Drive folder.
The inference dataset is a set of H5 files stored inside `exp_data`
sub-folders.

```text
├── exp_data
│ ├── data
| │ ├── file_0.h5
| │ ├── file_1.h5
...
| │ ├── file_N.h5
```

2. As model, if a pre-trained checkpoint is not available,
we can create a dummy version of it with:

```python
import torch
Expand All @@ -23,8 +37,66 @@ The following is preliminary and not 100% ML/scientifically sound.
torch.save(my_gan, '3dgan-inference.pth')
```

3. Run inference with the following command:
3. Run inference command. This will generate a "3dgan-generated"
folder containing generated particle traces in form of torch tensors
(.pth files) and 3D scatter plots (.jpg images).

```bash
TODO
python train.py -p inference-pipeline.yaml
```

Note the same entry point as for training.

The inference execution will produce a folder called
"3dgan-generated-data" containing
generated 3D particle trajectories (overwritten if already
there). Each generated 3D image is stored both as a
torch tensor (.pth) and 3D scatter plot (.jpg):

```text
├── 3dgan-generated-data
│ ├── data
| │ ├── energy=1.296749234199524&angle=1.272539496421814.pth
| │ ├── energy=1.296749234199524&angle=1.272539496421814.jpg
...
| │ ├── energy=1.664689540863037&angle=1.4906378984451294.pth
| │ ├── energy=1.664689540863037&angle=1.4906378984451294.jpg
```

### Docker image

Build from project root with

```bash
# Local
docker buildx build -t itwinai-mnist-torch-inference -f use-cases/3dgan/Dockerfile .

# Ghcr.io
docker buildx build -t ghcr.io/intertwin-eu/itwinai-mnist-torch-inference:0.0.1 -f use-cases/mnist/torch/Dockerfile .
docker push ghcr.io/intertwin-eu/itwinai-mnist-torch-inference:0.0.1
```

From wherever a sample of MNIST jpg images is available
(folder called 'mnist-sample-data/'):

```text
├── $PWD
│ ├── mnist-sample-data
| │ ├── digit_0.jpg
| │ ├── digit_1.jpg
| │ ├── digit_2.jpg
...
| │ ├── digit_N.jpg
```

```bash
docker run -it --rm --name running-inference -v "$PWD":/usr/data ghcr.io/intertwin-eu/itwinai-mnist-torch-inference:0.0.1
```

This command will store the results in a folder called "mnist-predictions":

```text
├── $PWD
│ ├── mnist-predictions
| │ ├── predictions.csv
```
2 changes: 1 addition & 1 deletion use-cases/3dgan/inference-pipeline.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -101,4 +101,4 @@ executor:

- class_path: saver.ParticleImagesSaver
init_args:
save_dir: 3dgan-generated
save_dir: 3dgan-generated-data
4 changes: 3 additions & 1 deletion use-cases/3dgan/model.py
Original file line number Diff line number Diff line change
Expand Up @@ -761,7 +761,9 @@ def predict_step(
# print(f"Generator input: {generator_ip.shape}")
generated_images = self.generator(generator_ip)
# print(f"Generated batch size {generated_images.shape}")
return generated_images
return {'images': generated_images,
'energies': energy_batch,
'angles': ang_batch}

def configure_optimizers(self):
lr = self.lr
Expand Down
4 changes: 3 additions & 1 deletion use-cases/3dgan/requirements.txt
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
h5py>=3.7.0
google>=3.0.0
protobuf>=4.24.3
gdown>=4.7.1
gdown>=4.7.1
# plotly>=5.18.0
# kaleido>=0.2.1
71 changes: 68 additions & 3 deletions use-cases/3dgan/saver.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,6 +4,8 @@

import torch
from torch import Tensor
import matplotlib.pyplot as plt
import numpy as np

from itwinai.components import Saver

Expand Down Expand Up @@ -49,7 +51,70 @@ def save(self, generated_images: Dict[str, Tensor]) -> None:
shutil.rmtree(self.save_dir)
os.makedirs(self.save_dir)

# TODO: save as 3D plot image
# Save as torch tensor and jpg image
for img_id, img in generated_images.items():
img_path = os.path.join(self.save_dir, img_id + '.pth')
torch.save(img, img_path)
img_path = os.path.join(self.save_dir, img_id)
torch.save(img, img_path + '.pth')
self._save_image(img, img_id, img_path + '.jpg')

def _save_image(
self,
img: Tensor,
img_idx: str,
img_path: str,
center: bool = True
) -> None:
"""Converts a 3D tensor to a 3D scatter plot and saves it
to disk as jpg image.
"""
x_offset = img.shape[0] // 2 if center else 0
y_offset = img.shape[1] // 2 if center else 0
z_offset = img.shape[2] // 2 if center else 0

# Convert tensor dimension IDs to coordinates
x_coords = []
y_coords = []
z_coords = []
values = []

for x in range(img.shape[0]):
for y in range(img.shape[1]):
for z in range(img.shape[2]):
if img[x, y, z] > 0.0:
x_coords.append(x - x_offset)
y_coords.append(y - y_offset)
z_coords.append(z - z_offset)
values.append(img[x, y, z])

# import plotly.graph_objects as go
# normalize_intensity_by = 1
# trace = go.Scatter3d(
# x=x_coords,
# y=y_coords,
# z=z_coords,
# mode='markers',
# marker_symbol='square',
# marker_color=[
# f"rgba(0,0,255,{i*100//normalize_intensity_by/10})"
# for i in values],
# )
# fig = go.Figure()
# fig.add_trace(trace)
# fig.write_image(img_path)

values = np.array(values)
# 0-1 scaling
values = (values - values.min()) / (values.max() - values.min())
fig = plt.figure()
ax = fig.add_subplot(projection='3d')
ax.scatter(x_coords, y_coords, z_coords, alpha=values)
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')

# Extract energy and angle from idx
en, ang = img_idx.split('&')
en = en[7:]
ang = ang[6:]
ax.set_title(f"Energy: {en} - Angle: {ang}")
fig.savefig(img_path)
23 changes: 15 additions & 8 deletions use-cases/3dgan/trainer.py
Original file line number Diff line number Diff line change
Expand Up @@ -130,26 +130,33 @@ def predict(

predictions = cli.trainer.predict(model, datamodule=datamodule)

predictions = [
self.transform_predictions(pred) for pred in predictions
]
# Transpose predictions into images, energies and angles
images = torch.cat(list(map(
lambda pred: self.transform_predictions(
pred['images']), predictions
)))
energies = torch.cat(list(map(
lambda pred: pred['energies'], predictions
)))
angles = torch.cat(list(map(
lambda pred: pred['angles'], predictions
)))

predictions_dict = dict()
for idx, generated_img in enumerate(torch.cat(predictions)):
for idx, (img, en, ang) in enumerate(zip(images, energies, angles)):
if (self.max_samples is not None
and idx >= self.max_samples):
break
predictions_dict[str(idx)] = generated_img
sample_key = f"energy={en.item()}&angle={ang.item()}"
predictions_dict[sample_key] = img

print(len(predictions_dict))
return predictions_dict

def transform_predictions(self, batch: Batch) -> Batch:
"""
Post-process the predictions of the torch model.
"""
# TODO: post-process predictions
return batch
return batch.squeeze(1)

def execute(
self,
Expand Down

0 comments on commit 63a7aa0

Please sign in to comment.