23-05-24 | 2

pyplot 을 추가하여 loss 와 acc 가 학습할때 어떻게 변화하는지 적용
This commit is contained in:
jung-geun
2023-05-24 15:39:17 +09:00
parent 27d40ab56c
commit 7a612e4ca7
4 changed files with 433 additions and 131 deletions

View File

@@ -12,7 +12,7 @@
"name": "stderr", "name": "stderr",
"output_type": "stream", "output_type": "stream",
"text": [ "text": [
"2023-05-24 12:58:36.275491: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n" "2023-05-24 15:37:52.889357: E tensorflow/stream_executor/cuda/cuda_blas.cc:2981] Unable to register cuBLAS factory: Attempting to register factory for plugin cuBLAS when one has already been registered\n"
] ]
}, },
{ {
@@ -84,7 +84,25 @@
"metadata": { "metadata": {
"tags": [] "tags": []
}, },
"outputs": [], "outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"x_train : (28, 28, 1) | y_train : ()\n",
"x_test : (28, 28, 1) | y_test : ()\n"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"init particles position: 100%|██████████| 30/30 [00:00<00:00, 36.65it/s]\n",
"init velocities: 100%|██████████| 30/30 [00:00<00:00, 681.12it/s]\n",
"Iter 0/20: 13%|#3 | 4/30 [00:04<00:20, 1.28it/s]"
]
}
],
"source": [ "source": [
"'''\n", "'''\n",
"optimizer parameter\n", "optimizer parameter\n",
@@ -98,52 +116,70 @@
"pso parameter\n", "pso parameter\n",
"'''\n", "'''\n",
"n_particles = 30\n", "n_particles = 30\n",
"maxiter = 50\n", "maxiter = 20\n",
"# epochs = 1\n", "# epochs = 1\n",
"w = 0.8\n", "w = 0.8\n",
"c0 = 0.6\n", "c0 = 0.6\n",
"c1 = 1.6\n", "c1 = 1.6\n",
"\n", "\n",
"def auto_tuning(n_particles=n_particles, maxiter=maxiter, c0=c0, c1=c1, w=w):\n",
" x_train, y_train, x_test, y_test = get_data()\n",
" model = make_model()\n",
"\n", "\n",
" loss = keras.losses.MeanSquaredError()\n", "x_train, y_train, x_test, y_test = get_data()\n",
" optimizer = keras.optimizers.SGD(lr=lr, momentum=momentun, decay=decay, nesterov=nestrov)\n", "model = make_model()\n",
"\n",
"loss = keras.losses.MeanSquaredError()\n",
"\n", "\n",
"\n", "\n",
" pso_m = PSO(model=model, loss_method=loss, n_particles=n_particles, x_train=x_train, y_train=y_train)\n", "pso_m = PSO(model=model, loss_method=loss, n_particles=n_particles)\n",
" # c0 : 지역 최적값 중요도\n", "# c0 : 지역 최적값 중요도\n",
" # c1 : 전역 최적값 중요도\n", "# c1 : 전역 최적값 중요도\n",
" # w : 관성 (현재 속도를 유지하는 정도)\n", "# w : 관성 (현재 속도를 유지하는 정도)\n",
" best_weights, score = pso_m.optimize(x_train, y_train, x_test, y_test, maxiter=maxiter, c0=c0, c1=c1, w=w)\n", "best_weights, score = pso_m.optimize(x_train, y_train, x_test, y_test, maxiter=maxiter, c0=c0, c1=c1, w=w)\n",
" model.set_weights(best_weights)\n", "model.set_weights(best_weights)\n",
"\n", "\n",
" score_ = model.evaluate(x_test, y_test, verbose=2)\n", "score_ = model.evaluate(x_test, y_test, verbose=2)\n",
" print(f\" Test loss: {score_}\")\n", "print(f\" Test loss: {score_}\")\n",
" score = round(score_[1]*100, 2)\n", "score = round(score_[1]*100, 2)\n",
"\n",
"day = date.today().strftime(\"%Y-%m-%d\")\n",
"\n",
"os.makedirs(f'./model', exist_ok=True)\n",
"model.save(f'./model/{day}_{score}_mnist.h5')\n",
"json_save = {\n",
" \"name\" : f\"{day}_{score}_mnist.h5\",\n",
" \"score\" : score_,\n",
" \"maxiter\" : maxiter,\n",
" \"c0\" : c0,\n",
" \"c1\" : c1,\n",
" \"w\" : w \n",
"}\n",
"with open(f'./model/{day}_{score}_mnist.json', 'a') as f:\n",
" json.dump(json_save, f)\n",
" f.write(',\\n')\n",
"\n", "\n",
" day = date.today().strftime(\"%Y-%m-%d\")\n",
" \n",
" os.makedirs(f'./model', exist_ok=True)\n",
" model.save(f'./model/{day}_{score}_mnist.h5')\n",
" json_save = {\n",
" \"name\" : f\"{day}_{score}_mnist.h5\",\n",
" \"score\" : score_,\n",
" \"maxiter\" : maxiter,\n",
" \"c0\" : c0,\n",
" \"c1\" : c1,\n",
" \"w\" : w \n",
" }\n",
" with open(f'./model/{day}_{score}_mnist.json', 'a') as f:\n",
" json.dump(json_save, f)\n",
" f.write(',\\n')\n",
" \n",
" return model\n",
"\n", "\n",
"# auto_tuning(n_particles=30, maxiter=1000, c0=0.5, c1=1.5, w=0.75)\n" "# auto_tuning(n_particles=30, maxiter=1000, c0=0.5, c1=1.5, w=0.75)\n"
] ]
}, },
{
"cell_type": "code",
"execution_count": null,
"id": "1af7569b",
"metadata": {},
"outputs": [],
"source": [
"loss_, acc_ = pso_m.all_history()\n",
"\n",
"plt.subplot(2,1,1)\n",
"for layer in all_loss:\n",
" plt.plot(layer)\n",
"plt.title('loss history')\n",
"\n",
"plt.subplot(2,1,2)\n",
"for layer in all_acc:\n",
" plt.plot(layer)\n",
"plt.title('acc history')"
]
},
{ {
"cell_type": "code", "cell_type": "code",
"execution_count": 3, "execution_count": 3,
@@ -903,24 +939,24 @@
], ],
"source": [ "source": [
"# print(f\"정답 > {y_test}\")\n", "# print(f\"정답 > {y_test}\")\n",
"def get_score(model):\n",
" x_train, y_train, x_test, y_test = get_data()\n",
" \n",
" predicted_result = model.predict(x_test)\n",
" predicted_labels = np.argmax(predicted_result, axis=1)\n",
" not_correct = []\n",
" for i in tqdm(range(len(y_test)), desc=\"진행도\"):\n",
" if predicted_labels[i] != y_test[i]:\n",
" not_correct.append(i)\n",
" # print(f\"추론 > {predicted_labels[i]} | 정답 > {y_test[i]}\")\n",
" \n",
" print(f\"틀린 갯수 > {len(not_correct)}/{len(y_test)}\")\n",
"\n", "\n",
" for i in range(3):\n", "x_train, y_train, x_test, y_test = get_data()\n",
" plt.imshow(x_test[not_correct[i]].reshape(28,28), cmap='Greys')\n", "\n",
" plt.show() \n", "predicted_result = model.predict(x_test)\n",
" \n", "predicted_labels = np.argmax(predicted_result, axis=1)\n",
"get_score(auto_tuning(n_particles=30, maxiter=1000, c0=0.5, c1=1.5, w=0.75))" "not_correct = []\n",
"for i in tqdm(range(len(y_test)), desc=\"진행도\"):\n",
" if predicted_labels[i] != y_test[i]:\n",
" not_correct.append(i)\n",
" # print(f\"추론 > {predicted_labels[i]} | 정답 > {y_test[i]}\")\n",
" \n",
"print(f\"틀린 갯수 > {len(not_correct)}/{len(y_test)}\")\n",
"\n",
"\n",
"for i in range(3):\n",
" plt.imshow(x_test[not_correct[i]].reshape(28,28), cmap='Greys')\n",
"plt.show() \n",
"\n"
] ]
}, },
{ {

View File

@@ -77,7 +77,7 @@ def auto_tuning(n_particles=n_particles, maxiter=maxiter, c0=c0, c1=c1, w=w):
optimizer = keras.optimizers.SGD(lr=lr, momentum=momentun, decay=decay, nesterov=nestrov) optimizer = keras.optimizers.SGD(lr=lr, momentum=momentun, decay=decay, nesterov=nestrov)
pso_m = PSO(model=model, loss_method=loss, n_particles=n_particles, x_train=x_train, y_train=y_train) pso_m = PSO(model=model, loss_method=loss, n_particles=n_particles)
# c0 : 지역 최적값 중요도 # c0 : 지역 최적값 중요도
# c1 : 전역 최적값 중요도 # c1 : 전역 최적값 중요도
# w : 관성 (현재 속도를 유지하는 정도) # w : 관성 (현재 속도를 유지하는 정도)
@@ -128,7 +128,7 @@ def get_score(model):
# plt.imshow(x_test[not_correct[i]].reshape(28,28), cmap='Greys') # plt.imshow(x_test[not_correct[i]].reshape(28,28), cmap='Greys')
# plt.show() # plt.show()
get_score(auto_tuning(n_particles=30, maxiter=1000, c0=0.5, c1=1.5, w=0.75)) get_score(auto_tuning(n_particles=30, maxiter=50, c0=0.5, c1=1.5, w=0.75))
# %% # %%

View File

@@ -9,7 +9,7 @@ class PSO(object):
Class implementing PSO algorithm Class implementing PSO algorithm
""" """
def __init__(self, model: keras.models, x_train, y_train, loss_method=keras.losses.MeanSquaredError(), n_particles=5): def __init__(self, model: keras.models, loss_method=keras.losses.MeanSquaredError(), n_particles=5):
""" """
Initialize the key variables. Initialize the key variables.
@@ -31,11 +31,9 @@ class PSO(object):
m = keras.models.model_from_json(self.model_structure) m = keras.models.model_from_json(self.model_structure)
m.compile(loss=self.loss_method, m.compile(loss=self.loss_method,
optimizer="adam", metrics=["accuracy"]) optimizer="adam", metrics=["accuracy"])
# m.fit(x_train, y_train, epochs=1, batch_size=32, verbose=0) # 결과가 너무 좋지 않아서 처음 초기화 할때 어느정도 위치를 수정
self.particles_weights[_] = m.get_weights() self.particles_weights[_] = m.get_weights()
# print(f"shape > {self.particles_weights[_][0]}") # print(f"shape > {self.particles_weights[_][0]}")
# self.particles_weights.append(particle_node) # self.particles_weights.append(particle_node)
# print(f"particles_weights > {self.particles_weights}") # print(f"particles_weights > {self.particles_weights}")
@@ -78,7 +76,8 @@ class PSO(object):
n_particles)] # 각 파티클의 최적값의 점수 n_particles)] # 각 파티클의 최적값의 점수
self.g_best_score = 0 # 전역 최적값의 점수(초기화 - 무한대) self.g_best_score = 0 # 전역 최적값의 점수(초기화 - 무한대)
self.g_history = [] self.g_history = []
self.all_cost_history = [[] for i in range(n_particles)] self.loss_history = [[] for i in range(n_particles)]
self.acc_history = [[] for i in range(n_particles)]
self.g_best_score_history = [] self.g_best_score_history = []
self.history = [] self.history = []
@@ -224,12 +223,13 @@ class PSO(object):
if score[1] > self.g_best_score: if score[1] > self.g_best_score:
self.g_best_score = score[1] self.g_best_score = score[1]
self.g_best = self.particles_weights[i].copy() self.g_best = self.particles_weights[i].copy()
self.g_history.append(self.g_best) self.g_history.append(self.g_best.copy())
self.g_best_score_history.append( self.g_best_score_history.append(
self.g_best_score) self.g_best_score)
self.score = score self.score = score
self.all_cost_history[i].append(score) self.loss_history[i].append(score[0])
self.acc_history[i].append(score[1])
# if self.func(self.particles_weights[i]) < self.func(p_best): # if self.func(self.particles_weights[i]) < self.func(p_best):
# self.p_best[i] = self.particles_weights[i] # self.p_best[i] = self.particles_weights[i]
# if self. # if self.
@@ -240,7 +240,7 @@ class PSO(object):
# self.g_history.append(self.g_best) # self.g_history.append(self.g_best)
# print(f"{i} particle score : {score[0]}") # print(f"{i} particle score : {score[0]}")
print( print(
f"loss avg : {self.score[0]/self.n_particles} | acc avg : {self.score[1]/self.n_particles} | best loss : {self.g_best_score}") f"loss avg : {self.score[0]/self.n_particles} | acc avg : {self.score[1]/self.n_particles} | best score : {self.g_best_score}")
# self.history.append(self.particles_weights.copy()) # self.history.append(self.particles_weights.copy())
@@ -279,5 +279,5 @@ class PSO(object):
def global_score_history(self): def global_score_history(self):
return self.g_best_score_history.copy() return self.g_best_score_history.copy()
def all_cost(self): def all_history(self):
return self.all_cost_history.copy() return self.loss_history, self.acc_history.copy()

402
xor.ipynb

File diff suppressed because one or more lines are too long