Deep Kernel Learning driven piezoresponse spectroscopy#
\(_{Yongtao}\) \(_{Liu,}\)
\(_{youngtaoliu@gmail.com}\)
\(_{Feb}\) \(_{2024}\)
Install and Import#
import os
import win32com.client
import numpy as np
import time
import h5py
import sidpy
import pyNSID
import matplotlib.pyplot as plt
from tqdm import tqdm
import numpy as np
from scipy.signal import find_peaks
import h5py
from mlsocket import MLSocket
# import acquition.py
from Acquisition_v0_5 import Acquisition # include the Acquistion_v0.py in the same directory
Start BEPyAE.exe and set VI#
Start BEPyAE.ext
Set VI of BEPyAE; if this version includes PyScanner, also set VIs for PyScanner
newexp = Acquisition(exe_path = r"C:\BEPyAE 060123 01\BEPyAE.exe") # exe_path is the directory of BEPyAE;
Initialize Igor AR18#
Set offline development
Build a connection between BEPyAE and AR18
Get parameters in AR18
newexp.init_BEPyAE(offline_development = True) # set offline_development=True if doing offline development
# executing this will also initlize AR18
Set tip parameters#
set setpoint, tip locations
newexp.tip_control(tip_parms_dict = {"set_point_V_00": 1, "next_x_pos_00": -0.5, "next_y_pos_01": 0.5},
do_move_tip = True,
do_set_setpoint = True) # Executing this code will set setpoint to 1 V,
# and move tip to location [0.5, 0.5]
Setpoint is: 1.0
Tip parameters are: (-0.5, 0.5, 0.5)
Please reset if some parameters are incorrect
Set IO#
This defines IO parameters, such as AFM platform: AR18, amplifiers, channel data types, etc
newexp.define_io_cluster(IO_cluster_parms_dict = {"analog_output_amplifier_06": 1,
"channel_01_type_07": 1,
"channel_02_type_08": 2,"channel_03_type_09": 3,})
IO control parameters are: ('0 Cypher AR18', '6124', 4000000.0, 10.0, 10.0, 'AC and DC on AO0', 10.0, 'topography', 'current', 'aux', 'external')
Please reset if some parameters are incorrect
Set BE pulse parameters#
# set BE parameters
newexp.define_be_parms(be_parms_dict = {"center_frequency_Hz_00": 335, "band_width_Hz_01": 100,
"amplitude_V_02": 1, "phase_variation_03": 1,
"repeats_04": 4, "req_pulse_duration_s_05": 4,
"auto_smooth_ring_06": 1},
do_create_be_waveform = True)
BE parameters are: (335000.0, 100000.0, 1.0, 1.0, 4, 0.004, 1, 3352.2952763920002, 0.12159459061880915)
Please reset if some parameters are incorrect
(335000.0,
100000.0,
1.0,
1.0,
4,
0.004,
1,
3352.2952763920002,
0.12159459061880915)
BE Line scan to test BE parameters#
This is a single BE line scan
This returns 5 datasets: quick_fitting, complex spectra, and 3 channels
# Do a single line scan
qk_fit, com_spec, chn1, chn2, chn3 = newexp.do_line_scan(line_scan_parms_dict = {"num_BE_pulses_01": 32,
"start_x_pos_00": -0.5, "start_y_pos_01": 0,
"stop_x_pos_02": 0.5, "stop_y_pos_03": 0},
upload_to_daq = True, do_line_scan = True)
voltage offset and number of BE pulse are: (0.0, 32)
line scan start and end positions: (-0.5, 0.0, 0.5, 0.0)
Experiment Starts#
Run on local PC
Run below code on microscope computer.
Prior to expeirment, set a directory to save data#
os.chdir("/content/save directory/")
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
~\AppData\Local\Temp/ipykernel_25324/3425510952.py in <module>
----> 1 os.chdir("/content/save directory/")
FileNotFoundError: [WinError 3] The system cannot find the path specified: '/content/save directory/'
Step 1. Perform a BEPFM measurement#
dset_pfm, dset_chns, dset_cs = newexp.raster_scan(raster_parms_dict = {"scan_pixel": 256, "scan_x_start": -1.0,
"scan_y_start": -1.0,"scan_x_stop": 1.0,
"scan_y_stop": 1.0}, file_name = "BEPFM")
f, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(1, 6, figsize = (30, 5), dpi = 100)
ax1.imshow(dset_pfm[:,:,0])
ax2.imshow(dset_pfm[:,:,1])
ax3.imshow(dset_pfm[:,:,2])
ax4.imshow(dset_pfm[:,:,3])
ax5.imshow(dset_chns[0,:,:])
ax6.imshow(dset_chns[1,:,:])
plt.show()
20 locations are ready for experiments
Step 2. Prepare structure patch#
# normalize structure data
norm_ = lambda x: (x - x.min()) / (x.max() - x.min())
struc_img = np.asarray(dset_pfm[:,:,0]) # set structure image
struc_img = norm_(struc_img) # normalize
np.save('structure_image.npy', struc_img) # send this to GPU server later
print ("Structure image shape:", struc_img.shape)
# prepare structure image patches
coordinates = utils.get_coord_grid(struc_img, 1)
# patch size
window_size = 20
pix = struc_img.shape[1] - window_size + 1
# extract subimage for each point on a grid
features_all, coords, _ = utils.extract_subimages(struc_img, coordinates, window_size)
features_all = features_all[:,:,:,0]
# indices = coords.reshape(pix,pix,2)
indices = coords
print("Patch shape:", features_all.shape)
print("Indices list shape: ", indices.shape)
# plot structure image and an example patch
_, (ax1, ax2) = plt.subplots(1, 2, dpi = 100)
k = 20
ax1.imshow(struc_img, origin = "lower")
ax1.scatter(indices.reshase(-1, 2)[k, 1], indices.reshape(-1, 2)[k, 0], c = 'r')
ax2.imshow(features_all[k], origin = "lower")
plt.show()
Step 3. Send struc_img.npy to GPU server via sftp#
Run on GPU server
Run below code on GPU server.
Step 4. Install, import, and define help functions#
Please run this code @ GPU server
-------------------------------------------------------------------
import time
import os
import numpy as np
import pylab as pl
import torch
import gpytorch
import botorch
import atomai as aoi
from atomai import utils
# from scipy.signal import find_peaks
# from sklearn.model_selection import train_test_split
from typing import Union, Type
from mlsocket import MLSocket
Please run this code @ GPU server
-------------------------------------------------------------------
############---DKL Acquistion function---###########
def EI(model: Type[aoi.models.dklGPR], X: Union[np.ndarray, torch.Tensor],
best_f: Union[float, torch.Tensor], xi: Union[float, torch.Tensor] = 0.01,
batch_size: int = 100) -> np.ndarray:
"""
Expected Improvement
"""
tt = torch.tensor
types = (np.ndarray, np.float32, np.float64, float)
tor = lambda a: tt(a) if isinstance(a, types) else a
device=model.device
dtype = model.dtype
X, best_f, xi = tor(X), tor(best_f), tor(xi)
mean, var = model.predict(X.to(dtype).to(device), batch_size=batch_size)
mean, var = tor(mean), tor(var) # have to translate them back to torch tensors
sigma = var.sqrt()
u = (mean - best_f.expand_as(mean) - xi.expand_as(mean)) / sigma
normal = torch.distributions.Normal(torch.zeros_like(u), torch.ones_like(u))
ucdf = normal.cdf(u)
updf = torch.exp(normal.log_prob(u))
obj = sigma * (updf + u * ucdf)
return obj.detach().cpu().numpy()
Step 5. Prepare image patches#
Please run this code @ GPU server
-------------------------------------------------------------------
# prepare structure image patches
coordinates = utils.get_coord_grid(struc_img, 1)
# patch size
window_size = 20
pix = struc_img.shape[1] - window_size + 1
# extract subimage for each point on a grid
features_all, coords, _ = utils.extract_subimages(struc_img, coordinates, window_size)
features_all = features_all[:,:,:,0]
# indices = coords.reshape(pix,pix,2)
indices = coords
print("Patch shape:", features_all.shape)
print("Indices list shape: ", indices.shape)
Run on local PC
Run below code on microscope computer.
Step 6. Do first BEPS at random location#
# first BEPS measurement is performed at a random location
np.random.seed(0)
index = np.random.randint(len(indices)) # random location
print ("First location index: ", index)
# Do beps
do_beps(indices[index])
print("measurement done")
# Read data and calculate scalarizer
new_spec =
new_scalarizer =
# Define a list to save scalarizer
y_train_raw = np.asarray([])
y_train_raw = np.append(y_train_raw, new_scalarizer)
print('Now, the y_train_raw is {}'.format(y_train_raw))
# Normalize y_train
y_train_normalize = np.asarray([0.5]) # Since there is just a single value, we set it as 0.5
Step 7. Request connection to GPU server and start DKL exploration#
# Define exploration step
exploration_step = 200
HOST = 'localhost'
PORT = 9000
with MLSocket() as s:
s.connect((HOST, PORT))
for i in range (exploration_step):
print("##########----step {}/{}----##########".format(i+1, exploration_step))
starttime = time.time()
# Send the measured data to GPU server
new_data = np.asarray(y_train_normalize[-1])
s.send(new_data)
time.sleep(0.01)
# Listen next location
next_location = s.recv(920)
time.sleep(0.01)
print("predicted point ready", next_location)
###### Do BEPS Measurement at predicted location ########
newexp.do_beps(next_location)
print("measurement done")
# Read data and calculate scalarizer
new_spec =
new_scalarizer =
y_train_raw = np.append(y_train_raw, new_scalarizer)
print('Now, the y_train_raw is {}'.format(y_train_raw))
# Normalize y_train
y_train_normalize = (y_train_raw - y_train_raw.min())/y_train_raw.ptp()
# Plot figure
clear_output(wait=True)
plt.figure()
plt.imshow(struc_img, cmap = 'gray')
plt.scatter(next_loc[0], next_loc[1], c = 'r')
plt.show()
print("time in this step: ", time.time()-starttime)
s.close()
Run on GPU server
Run below code on GPU server.
Step 8. Connect to local PC and start DKL exploration#
Please run this code @ GPU server
-------------------------------------------------------------------
data_dim = X_train.shape[-1]
exploration_step = 200
xi = 0.01
HOST = ''
PORT = 3446
with MLSocket() as s:
s.bind((HOST, PORT))
print("bounding......\nplease bound the other end")
s.listen()
conn, address = s.accept()
with conn:
print('DKL starts')
for step in range(exploration_steps):
print("##########----step {}/{}----##########".format(step+1, exploration_steps))
if step == 0:
np.random.seed(0)
index = np.random.randint(len(indices)) # may need to manually added the random index here
print("First index: ", index)
# Update train and test data
X_train [0,] = X[idx,]
X_test = np.delete(X_test, idx, 0)
indices_train[0,] = indices_test[idx,]
indices_test = np.delete(indices_test, idx, 0)
# Listen to client for measurement result
measured_point = conn.recv(920)
print("Received new point data")
#update training data
y_train = np.append(y_train, measured_point)
else:
#listen to client for measurement result
measured_point = conn.recv(920)
print("received new point data")
#update training data
y_train = np.append(y_train, measured_point)
X_med[0,] = X_test[next_point_idx,]
X_train = np.append(X_train, X_med, axis = 0)
X_test = np.delete(X_test, next_point_idx, 0)
indices_med[0,] = indices_test[next_point_idx]
indices_train = np.append(indices_train, indices_med, axis = 0)
indices_test = np.delete(indices_test, next_point_idx, 0)
dklgp = aoi.models.dklGPR(data_dim, embedim=2, precision="single")
dklgp.fit(X_train, y_train, training_cycles=200)
# Compute acquisition function
# best_f = torch.tensor(dklgp.predict(X_train)[0].max(), device=dklgp.device)
# obj_mean = EI(dklgp, X_test, best_f, xi, batch_size = 2000)
# Select next point to "measure"
_, var_ = dklgp.predict(X_test, batch_size = len(X_test))
next_point_idx = var_.argmax()
next_points = np.asarray(indices_test[next_point_idx])
#send next point to client
conn.send(next_points)
print("Send next point index and next point: ", next_point_idx, next_points)
#save step record
np.savez(os.path.join(savedir, "record{}.npz".format(step)), x_train = X_train, y_train = y_train,
indice_train = indices_train, indice_test=indices_test, var=var_, nextpoint=next_points)
# np.savez(("/exp_record/record{}.npz".format(step)), indicestest=indices_test,
# objmean=obj_mean, nextpoint=next_points)
Run on local PC
Run below code on microscope computer.
Step 8. Do a BEPFM at the whole experiment area#
dset_pfm, dset_chns, dset_cs = newexp.raster_scan(raster_parms_dict = {"scan_pixel": 256, "scan_x_start": -1.0,
"scan_y_start": -1.0,"scan_x_stop": 1.0,
"scan_y_stop": 1.0}, file_name = "pfm_whole")
f, (ax1, ax2, ax3, ax4, ax5, ax6) = plt.subplots(1, 6, figsize = (30, 5), dpi = 100)
ax1.imshow(dset_pfm[:,:,0])
ax2.imshow(dset_pfm[:,:,1])
ax3.imshow(dset_pfm[:,:,2])
ax4.imshow(dset_pfm[:,:,3])
ax5.imshow(dset_chns[0,:,:])
ax6.imshow(dset_chns[1,:,:])
plt.show()