mirror of
https://github.com/jung-geun/PSO.git
synced 2025-12-20 04:50:45 +09:00
23-06-28
단순 업데이트
This commit is contained in:
BIN
history_plt/iris_0624_1.png
Normal file
BIN
history_plt/iris_0624_1.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 292 KiB |
12
iris.py
12
iris.py
@@ -1,22 +1,24 @@
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
tf.random.set_seed(777) # for reproducibility
|
tf.random.set_seed(777) # for reproducibility
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
np.random.seed(777)
|
np.random.seed(777)
|
||||||
|
|
||||||
|
import gc
|
||||||
|
|
||||||
|
from pso import Optimizer
|
||||||
from sklearn.datasets import load_iris
|
from sklearn.datasets import load_iris
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.model_selection import train_test_split
|
||||||
|
|
||||||
from tensorflow import keras
|
from tensorflow import keras
|
||||||
from tensorflow.keras import layers
|
from tensorflow.keras import layers
|
||||||
from tensorflow.keras.models import Sequential
|
from tensorflow.keras.models import Sequential
|
||||||
|
|
||||||
from pso import Optimizer
|
|
||||||
|
|
||||||
import gc
|
|
||||||
|
|
||||||
def make_model():
|
def make_model():
|
||||||
model = Sequential()
|
model = Sequential()
|
||||||
@@ -45,7 +47,7 @@ loss = ['categorical_crossentropy']
|
|||||||
pso_iris = Optimizer(
|
pso_iris = Optimizer(
|
||||||
model,
|
model,
|
||||||
loss=loss[0],
|
loss=loss[0],
|
||||||
n_particles=75,
|
n_particles=100,
|
||||||
c0=0.4,
|
c0=0.4,
|
||||||
c1=0.8,
|
c1=0.8,
|
||||||
w_min=0.7,
|
w_min=0.7,
|
||||||
|
|||||||
19
mnist.py
19
mnist.py
@@ -59,8 +59,6 @@ def make_model():
|
|||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
# %%
|
|
||||||
|
|
||||||
# %%
|
# %%
|
||||||
model = make_model()
|
model = make_model()
|
||||||
x_test, y_test = get_data_test()
|
x_test, y_test = get_data_test()
|
||||||
@@ -73,12 +71,12 @@ if __name__ == "__main__":
|
|||||||
model,
|
model,
|
||||||
loss=loss[0],
|
loss=loss[0],
|
||||||
n_particles=75,
|
n_particles=75,
|
||||||
c0=0.35,
|
c0=0.3,
|
||||||
c1=0.8,
|
c1=0.7,
|
||||||
w_min=0.7,
|
w_min=0.6,
|
||||||
w_max=1.15,
|
w_max=0.9,
|
||||||
negative_swarm=0.25,
|
negative_swarm=0.25,
|
||||||
momentun_swarm=0.25,
|
momentun_swarm=0,
|
||||||
)
|
)
|
||||||
|
|
||||||
best_score = pso_mnist.fit(
|
best_score = pso_mnist.fit(
|
||||||
@@ -87,13 +85,12 @@ if __name__ == "__main__":
|
|||||||
epochs=200,
|
epochs=200,
|
||||||
save=True,
|
save=True,
|
||||||
save_path="./result/mnist",
|
save_path="./result/mnist",
|
||||||
renewal="loss",
|
renewal="acc",
|
||||||
empirical_balance=False,
|
empirical_balance=False,
|
||||||
Dispersion=False,
|
Dispersion=False,
|
||||||
check_point=25
|
check_point=25
|
||||||
)
|
)
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
finally:
|
||||||
# pso_mnist.model_save("./result/mnist")
|
gc.collect()
|
||||||
# pso_mnist.save_info("./result/mnist")
|
|
||||||
@@ -12,8 +12,6 @@ from tqdm import tqdm
|
|||||||
|
|
||||||
# import cupy as cp
|
# import cupy as cp
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
gpus = tf.config.experimental.list_physical_devices("GPU")
|
gpus = tf.config.experimental.list_physical_devices("GPU")
|
||||||
if gpus:
|
if gpus:
|
||||||
try:
|
try:
|
||||||
@@ -37,6 +35,8 @@ class Optimizer:
|
|||||||
momentun_swarm: float = 0,
|
momentun_swarm: float = 0,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
|
particle swarm optimization
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
model (keras.models): 모델 구조
|
model (keras.models): 모델 구조
|
||||||
loss (str): 손실함수
|
loss (str): 손실함수
|
||||||
@@ -67,7 +67,8 @@ class Optimizer:
|
|||||||
m = keras.models.model_from_json(model.to_json())
|
m = keras.models.model_from_json(model.to_json())
|
||||||
init_weights = m.get_weights()
|
init_weights = m.get_weights()
|
||||||
w_, sh_, len_ = self._encode(init_weights)
|
w_, sh_, len_ = self._encode(init_weights)
|
||||||
w_ = np.random.uniform(-1.5, 1.5, len(w_))
|
w_ = np.random.rand(len(w_)) * 4 - 2
|
||||||
|
# w_ = np.random.uniform(-1.5, 1.5, len(w_))
|
||||||
m.set_weights(self._decode(w_, sh_, len_))
|
m.set_weights(self._decode(w_, sh_, len_))
|
||||||
m.compile(loss=self.loss, optimizer="sgd", metrics=["accuracy"])
|
m.compile(loss=self.loss, optimizer="sgd", metrics=["accuracy"])
|
||||||
self.particles[i] = Particle(m, loss, negative=True if i < negative_swarm * self.n_particles else False, momentun=True if i > self.n_particles * (1 - self.momentun_swarm) else False)
|
self.particles[i] = Particle(m, loss, negative=True if i < negative_swarm * self.n_particles else False, momentun=True if i > self.n_particles * (1 - self.momentun_swarm) else False)
|
||||||
@@ -309,17 +310,23 @@ class Optimizer:
|
|||||||
|
|
||||||
if renewal == "acc":
|
if renewal == "acc":
|
||||||
if score[1] >= self.g_best_score[0]:
|
if score[1] >= self.g_best_score[0]:
|
||||||
self.g_best_score[0] = score[1]
|
if score[1] > self.g_best_score[0]:
|
||||||
if score[0] <= self.g_best_score[1]:
|
self.g_best_score[0] = score[1]
|
||||||
self.g_best_score[1] = score[0]
|
self.g_best = self.particles[i].get_best_weights()
|
||||||
self.g_best = self.particles[i].get_best_weights()
|
else:
|
||||||
|
if score[0] < self.g_best_score[1]:
|
||||||
|
self.g_best_score[1] = score[0]
|
||||||
|
self.g_best = self.particles[i].get_best_weights()
|
||||||
epochs_pbar.set_description(f"best {self.g_best_score[0]:.4f} | {self.g_best_score[1]:.4f}")
|
epochs_pbar.set_description(f"best {self.g_best_score[0]:.4f} | {self.g_best_score[1]:.4f}")
|
||||||
elif renewal == "loss":
|
elif renewal == "loss":
|
||||||
if score[0] <= self.g_best_score[1]:
|
if score[0] <= self.g_best_score[1]:
|
||||||
self.g_best_score[1] = score[0]
|
if score[0] < self.g_best_score[1]:
|
||||||
if score[1] >= self.g_best_score[0]:
|
self.g_best_score[1] = score[0]
|
||||||
self.g_best_score[0] = score[1]
|
self.g_best = self.particles[i].get_best_weights()
|
||||||
self.g_best = self.particles[i].get_best_weights()
|
else:
|
||||||
|
if score[1] > self.g_best_score[0]:
|
||||||
|
self.g_best_score[0] = score[1]
|
||||||
|
self.g_best = self.particles[i].get_best_weights()
|
||||||
epochs_pbar.set_description(f"best {self.g_best_score[0]:.4f} | {self.g_best_score[1]:.4f}")
|
epochs_pbar.set_description(f"best {self.g_best_score[0]:.4f} | {self.g_best_score[1]:.4f}")
|
||||||
|
|
||||||
if score[0] == None:
|
if score[0] == None:
|
||||||
@@ -420,6 +427,7 @@ class Optimizer:
|
|||||||
"empirical_balance": self.empirical_balance,
|
"empirical_balance": self.empirical_balance,
|
||||||
"Dispersion": self.Dispersion,
|
"Dispersion": self.Dispersion,
|
||||||
"negative_swarm": self.negative_swarm,
|
"negative_swarm": self.negative_swarm,
|
||||||
|
"momentun_swarm": self.momentun_swarm,
|
||||||
"renewal": self.renewal,
|
"renewal": self.renewal,
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -114,6 +114,8 @@ class Particle:
|
|||||||
self.best_score = score[1]
|
self.best_score = score[1]
|
||||||
self.best_weights = self.model.get_weights()
|
self.best_weights = self.model.get_weights()
|
||||||
elif renewal == "loss":
|
elif renewal == "loss":
|
||||||
|
if score[0] == 'nan':
|
||||||
|
score[0] = np.inf
|
||||||
if score[0] < self.best_score:
|
if score[0] < self.best_score:
|
||||||
self.best_score = score[0]
|
self.best_score = score[0]
|
||||||
self.best_weights = self.model.get_weights()
|
self.best_weights = self.model.get_weights()
|
||||||
|
|||||||
@@ -147,7 +147,7 @@ best_score = pso_iris.fit(
|
|||||||
```
|
```
|
||||||
|
|
||||||
위의 파라미터 기준 2 세대에 94%의 정확도를, 7 세대에 96%, 106 세대에 99.16%의 정확도를 보였습니다
|
위의 파라미터 기준 2 세대에 94%의 정확도를, 7 세대에 96%, 106 세대에 99.16%의 정확도를 보였습니다
|
||||||

|

|
||||||
|
|
||||||
3. mnist 문제
|
3. mnist 문제
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user