atom-predict/egnn_v2/stastic_center_v5.py

125 lines
4.5 KiB
Python

import numpy as np
import matplotlib.pyplot as plt
from egnn_core.data import load_data_v3,load_data_v5
import os
import json
#根据连接来遍历点的表
#第一步,遍历edge index.T里面的第一列对应的点的序号[:,0]
#第二步,根据序号去找对应的label是否为普通,即0,如果等于0进入下一步,不然跳过;这一步是为了排除掉非普通原子的
#第三步,判断普通的原子的邻居是不是也是普通的:
#先找edge index.T里面的第一列对应的点的序号[:,1],即邻居有哪些
#去判断这些邻居有多少个属于线缺陷,即label是2,并计数
#记住,此时要把该点从表中永远排除,避免重复技术
# edge_index = np.array([[1, 3], [4, 5], [1, 7], [1, 10]])
# labels = np.array([0,0,0,2,0,2,0,2,0,0,2])
# 指定文件夹路径
folder_path = '/home/gao/mouclear/analyze_center/new_v2'
img_size = 2048
# target_size = [150,200]
target_size = [165,185]
def calculate_entropy(image):
# 计算每个灰度级出现的次数
hist, _ = np.histogram(image, bins=range(256), range=(0, 255))
# 计算灰度级的概率
probabilities = hist / np.sum(hist)
# 计算熵
entropy = -np.sum(probabilities[probabilities > 0] * np.log2(probabilities[probabilities > 0]))
return entropy
def process_json_file(json_file):
points, edge_index, labels, lights = load_data_v5(json_file) # 假设这是你的数据加载函数
center_index = np.where(labels == 4)
graph_index = np.where(labels == 3)
combined_index = np.array(center_index[0].tolist() + graph_index[0].tolist())
selected_point = []
selected_light = []
for i in combined_index:
selected_point.append(points[i])
selected_light.append(lights[i])
selected_point = np.array(selected_point)
selected_light = np.array(selected_light)
bg = np.zeros((img_size, img_size))
plt.figure(figsize=(9, 9))
for light, point in zip(selected_light, selected_point):
img_h, img_w = bg.shape # 背景图像的高度和宽度
h, w = point
ps = 1 # 因为 light 是 3x3 的区域,所以 ps 应该是 1
hs = np.clip(h - ps, 0, img_h - ps - 1)
ws = np.clip(w - ps, 0, img_w - ps - 1)
he = hs + 2 * ps + 1 # 3x3 区域的结束行索引
we = ws + 2 * ps + 1 # 3x3 区域的结束列索引
# 正确地将 light 重塑为 3x3 的二维数组
light_2d = light.reshape((3, 3))
# 绘制到背景图像上
bg[hs:he, ws:we] = light_2d
# 绘制完所有 light 后,找到被修改的区域
modified_rows, modified_cols = np.where(bg != 0)
# 计算最小和最大索引,以确定被修改区域的范围
min_row, max_row = min(modified_rows), max(modified_rows)
min_col, max_col = min(modified_cols), max(modified_cols)
# 计算需要裁剪的边界,以保持统一尺寸
pad_height = max_row - min_row
pad_width = max_col - min_col
pad_top = (target_size[0] - pad_height) // 2
pad_left = (target_size[1] - pad_width) // 2
# 确保裁剪尺寸不超出原始图片尺寸
pad_top = min(pad_top, img_size - max_row)
pad_left = min(pad_left, img_size - max_col)
# 裁剪背景图像,只保留被修改的区域,并添加边界以保持统一尺寸
cropped_bg = bg[
min_row - pad_top:min_row - pad_top + target_size[0],
min_col - pad_left:min_col - pad_left + target_size[1]
]
# 裁剪背景图像,只保留被修改的区域
# cropped_bg = bg[min_row-1:max_row + 1, min_col-1:max_col + 1]
# 绘制裁剪后的背景图像
plt.figure(figsize=(9, 9))
plt.imshow(cropped_bg, cmap='gray') # 使用 'nearest' 插值以保持像素的完整性
plt.axis('off')
plt.tight_layout()
json_filename = os.path.basename(json_file)
json_filename_without_ext = os.path.splitext(json_filename)[0]
image_filename = f"{json_filename_without_ext}.png"
plt.savefig(image_filename, bbox_inches='tight', pad_inches=0)
plt.close() # 关闭绘图以释放资源
# 计算 bg 图像的熵
entropy = calculate_entropy(cropped_bg)
print(f"The entropy of the image is: {entropy}")
# 可以选择再次显示图像和熵值
plt.imshow(cropped_bg, cmap='gray')
# plt.title(f"Image Entropy: {entropy}")
plt.axis('off')
plt.show()
# 遍历文件夹中的所有文件
for filename in os.listdir(folder_path):
if filename.endswith('.json'):
print(filename)
json_file = os.path.join(folder_path, filename)
process_json_file(json_file)