전체 파티클 중 일부를 현재 속도의 음수 방향으로 진행하도록 하여 지역해에 갇혀 조기수렴하는 문제의 방안으로 사용
This commit is contained in:
jung-geun
2023-05-31 02:52:32 +09:00
parent c5731c6870
commit 8012cf3557
7 changed files with 171 additions and 121 deletions

View File

@@ -39,7 +39,7 @@ x_train, x_test, y_train, y_test = load_data()
loss = 'categorical_crossentropy' loss = 'categorical_crossentropy'
pso_iris = Optimizer(model, loss=loss, n_particles=50, c0=0.5, c1=0.8, w_min=0.7, w_max=1.3) pso_iris = Optimizer(model, loss=loss, n_particles=50, c0=0.5, c1=0.8, w_min=0.75, w_max=1.3)
weight, score = pso_iris.fit( weight, score = pso_iris.fit(
x_train, y_train, epochs=500, save=True, save_path="./result/iris", renewal="acc", empirical_balance=False, Dispersion=False, check_point=50) x_train, y_train, epochs=500, save=True, save_path="./result/iris", renewal="acc", empirical_balance=False, Dispersion=False, check_point=50)

BIN
iris_50_99.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 177 KiB

BIN
iris_99.16.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 143 KiB

View File

@@ -23,9 +23,9 @@ from tqdm import tqdm
import gc import gc
print(tf.__version__) # print(tf.__version__)
print(tf.config.list_physical_devices()) # print(tf.config.list_physical_devices())
print(f"Num GPUs Available: {len(tf.config.list_physical_devices('GPU'))}") # print(f"Num GPUs Available: {len(tf.config.list_physical_devices('GPU'))}")
def get_data(): def get_data():
@@ -79,9 +79,9 @@ loss = 'huber_loss'
# loss = 'mean_squared_error' # loss = 'mean_squared_error'
pso_mnist = Optimizer(model, loss=loss, n_particles=50, c0=0.5, c1=0.8, w_min=0.75, w_max=1.3) pso_mnist = Optimizer(model, loss=loss, n_particles=50, c0=0.4, c1=0.8, w_min=0.7, w_max=1.2, random=0.3)
weight, score = pso_mnist.fit( weight, score = pso_mnist.fit(
x_test, y_test, epochs=1000, save=True, save_path="./result/mnist", renewal="acc", empirical_balance=False, Dispersion=True) x_test, y_test, epochs=200, save=True, save_path="./result/mnist", renewal="acc", empirical_balance=False, Dispersion=False, check_point=10)
pso_mnist.model_save("./result/mnist") pso_mnist.model_save("./result/mnist")
pso_mnist.save_info("./result/mnist") pso_mnist.save_info("./result/mnist")

File diff suppressed because one or more lines are too long

View File

@@ -1,4 +1,5 @@
import os import os
import sys
import tensorflow as tf import tensorflow as tf
from tensorflow import keras from tensorflow import keras
@@ -15,7 +16,19 @@ import gc
from pso.particle import Particle from pso.particle import Particle
class Optimizer: class Optimizer:
"""
Args:
model (keras.models): 모델 구조
loss (str): 손실함수
n_particles (int): 파티클 개수
c0 (float): local rate - 지역 최적값 관성 수치
c1 (float): global rate - 전역 최적값 관성 수치
w_min (float): 최소 관성 수치
w_max (float): 최대 관성 수치
random (float): 랜덤 파티클 비율 - 0 ~ 1 사이의 값
"""
def __init__( def __init__(
self, self,
model: keras.models, model: keras.models,
@@ -25,6 +38,7 @@ class Optimizer:
c1=1.5, c1=1.5,
w_min=0.5, w_min=0.5,
w_max=1.5, w_max=1.5,
random:float = 0,
): ):
self.model = model # 모델 구조 self.model = model # 모델 구조
self.loss = loss # 손실함수 self.loss = loss # 손실함수
@@ -38,23 +52,28 @@ class Optimizer:
self.g_best_score = 0 # 최고 점수 - 시작은 0으로 초기화 self.g_best_score = 0 # 최고 점수 - 시작은 0으로 초기화
self.g_best = None # 최고 점수를 받은 가중치 self.g_best = None # 최고 점수를 받은 가중치
self.g_best_ = None # 최고 점수를 받은 가중치 - 값의 분산을 위한 변수 self.g_best_ = None # 최고 점수를 받은 가중치 - 값의 분산을 위한 변수
self.avg_score = 0 # 평균 점수
for i in tqdm(range(self.n_particles), desc="Initializing Particles"): for i in tqdm(range(self.n_particles), desc="Initializing Particles"):
m = keras.models.model_from_json(model.to_json()) m = keras.models.model_from_json(model.to_json())
init_weights = m.get_weights() init_weights = m.get_weights()
w_, sh_, len_ = self._encode(init_weights) w_, sh_, len_ = self._encode(init_weights)
w_ = np.random.uniform(-0.1, 0.1, len(w_)) w_ = np.random.uniform(-3, 3, len(w_))
m.set_weights(self._decode(w_, sh_, len_)) m.set_weights(self._decode(w_, sh_, len_))
m.compile(loss=self.loss, optimizer="sgd", metrics=["accuracy"]) m.compile(loss=self.loss, optimizer="sgd", metrics=["accuracy"])
self.particles[i] = Particle(m, loss) if i < random * self.n_particles:
self.particles[i] = Particle(m, loss, random=True)
else:
self.particles[i] = Particle(m, loss, random=False)
""" """
Args:
weights (list) : keras model의 가중치
Returns: Returns:
(cupy array) : 가중치 - 1차원으로 풀어서 반환 (numpy array) : 가중치 - 1차원으로 풀어서 반환
(list) : 가중치의 원본 shape (list) : 가중치의 원본 shape
(list) : 가중치의 원본 shape의 길이 (list) : 가중치의 원본 shape의 길이
""" """
def _encode(self, weights): def _encode(self, weights):
# w_gpu = cp.array([]) # w_gpu = cp.array([])
w_gpu = np.array([]) w_gpu = np.array([])
@@ -70,6 +89,10 @@ class Optimizer:
return w_gpu, shape, lenght return w_gpu, shape, lenght
""" """
Args:
weight (numpy array) : 가중치 - 1차원으로 풀어진 상태
shape (list) : 가중치의 원본 shape
lenght (list) : 가중치의 원본 shape의 길이
Returns: Returns:
(list) : 가중치 원본 shape으로 복원 (list) : 가중치 원본 shape으로 복원
""" """
@@ -102,27 +125,16 @@ class Optimizer:
return 1 + np.abs(score) return 1 + np.abs(score)
""" """
parameters Args:
----------
x : numpy.ndarray
y : numpy.ndarray
epochs : int
save : bool
save_path : str ex) "./result"
renewal : str ex) "acc" or "loss"
"""
"""
parameters
fit(
x_test : numpy.ndarray, x_test : numpy.ndarray,
y_test : numpy.ndarray, y_test : numpy.ndarray,
epochs : int, epochs : int,
save : bool - True : save, False : not save save : bool - True : save, False : not save
save_path : str ex) "./result", save_path : str ex) "./result",
renewal : str ex) "acc" or "loss", renewal : str ex) "acc" or "loss",
empirical_balance : bool - True : empirical balance, False : no balance empirical_balance : bool - True :
Dispersion : bool - True : random search, False : PSO Dispersion : bool - True : g_best 의 값을 분산시켜 전역해를 찾음, False : g_best 의 값만 사용
check_point : int - 저장할 위치 - None : 저장 안함
""" """
def fit( def fit(
self, self,
@@ -136,6 +148,8 @@ class Optimizer:
Dispersion: bool = False, Dispersion: bool = False,
check_point: int = None, check_point: int = None,
): ):
self.save_path = save_path
self.renewal = renewal self.renewal = renewal
if renewal == "acc": if renewal == "acc":
self.g_best_score = 0 self.g_best_score = 0
@@ -150,7 +164,9 @@ class Optimizer:
os.makedirs(save_path, exist_ok=True) os.makedirs(save_path, exist_ok=True)
self.day = datetime.now().strftime("%m-%d-%H-%M") self.day = datetime.now().strftime("%m-%d-%H-%M")
for i, p in enumerate(self.particles): # for i, p in enumerate(self.particles):
for i in tqdm(range(self.n_particles), desc="Initializing Particles"):
p = self.particles[i]
local_score = p.get_score(x, y, renewal=renewal) local_score = p.get_score(x, y, renewal=renewal)
if renewal == "acc": if renewal == "acc":
@@ -166,103 +182,131 @@ class Optimizer:
print(f"initial g_best_score : {self.g_best_score}") print(f"initial g_best_score : {self.g_best_score}")
for _ in range(epochs): try:
acc = 0 for _ in range(epochs):
loss = 0 print(f"epoch {_ + 1}/{epochs}")
min_score = np.inf acc = 0
max_score = 0 loss = 0
min_loss = np.inf min_score = np.inf
max_loss = 0 max_score = 0
min_loss = np.inf
max_loss = 0
# for i in tqdm(range(len(self.particles)), desc=f"epoch {_ + 1}/{epochs}", ascii=True): # for i in tqdm(range(len(self.particles)), desc=f"epoch {_ + 1}/{epochs}", ascii=True):
for i in range(len(self.particles)): for i in range(len(self.particles)):
w = self.w_min + (self.w_max - self.w_min) * _ / epochs w = self.w_max - (self.w_max - self.w_min) * _ / epochs
if Dispersion: if Dispersion:
g_best = self.g_best_ g_best = self.g_best_
else: else:
g_best = self.g_best g_best = self.g_best
if empirical_balance: if empirical_balance:
if np.random.rand() < np.exp(-(_) / epochs): if np.random.rand() < np.exp(-(_) / epochs):
w_p_ = self.f(x, y, self.particles[i].get_best_weights()) w_p_ = self.f(x, y, self.particles[i].get_best_weights())
w_g_ = self.f(x, y, self.g_best) w_g_ = self.f(x, y, self.g_best)
w_p = w_p_ / (w_p_ + w_g_) w_p = w_p_ / (w_p_ + w_g_)
w_g = w_p_ / (w_p_ + w_g_) w_g = w_p_ / (w_p_ + w_g_)
del w_p_
del w_g_
else:
p_b = self.particles[i].get_best_score()
g_a = self.avg_score
l_b = p_b - g_a
l_b = np.sqrt(np.power(l_b, 2))
p_ = 1 / (self.n_particles * np.linalg.norm(self.c1 - self.c0)) * l_b
p_ = np.exp(-1 * p_)
w_p = p_
w_g = 1 - p_
score = self.particles[i].step_w(
x, y, self.c0, self.c1, w, g_best, w_p, w_g, renewal=renewal
)
else: else:
p = 1 / (self.n_particles * np.linalg.norm(self.c1 - self.c0)) score = self.particles[i].step(
p = np.exp(-p) x, y, self.c0, self.c1, w, g_best, renewal=renewal
w_p = p )
w_g = 1 - p
score = self.particles[i].step_w( if renewal == "acc":
x, y, self.c0, self.c1, w, g_best, w_p, w_g, renewal=renewal if score[1] >= self.g_best_score:
) self.g_best_score = score[1]
self.g_best = self.particles[i].get_best_weights()
elif renewal == "loss":
if score[0] <= self.g_best_score:
self.g_best_score = score[0]
self.g_best = self.particles[i].get_best_weights()
else: loss += score[0]
score = self.particles[i].step( acc += score[1]
x, y, self.c0, self.c1, w, g_best, renewal=renewal if score[0] < min_loss:
) min_loss = score[0]
if score[0] > max_loss:
max_loss = score[0]
if renewal == "acc": if score[1] < min_score:
if score[1] >= self.g_best_score: min_score = score[1]
self.g_best_score = score[1] if score[1] > max_score:
self.g_best = self.particles[i].get_best_weights() max_score = score[1]
elif renewal == "loss":
if score[0] <= self.g_best_score:
self.g_best_score = score[0]
self.g_best = self.particles[i].get_best_weights()
loss += score[0] gc.collect()
acc += score[1]
if score[0] < min_loss:
min_loss = score[0]
if score[0] > max_loss:
max_loss = score[0]
if score[1] < min_score: if save:
min_score = score[1] with open(
if score[1] > max_score: f"./{save_path}/{self.day}_{self.n_particles}_{epochs}_{self.c0}_{self.c1}_{self.w_min}_{renewal}.csv",
max_score = score[1] "a",
) as f:
f.write(f"{score[0]}, {score[1]}")
if i != self.n_particles - 1:
f.write(", ")
TS = self.c0 + np.random.rand() * (self.c1 - self.c0)
g_, g_sh, g_len = self._encode(self.g_best)
decrement = (epochs - (_) + 1) / epochs
g_ = (1 - decrement) * g_ + decrement * TS
self.g_best_ = self._decode(g_, g_sh, g_len)
if save: if save:
with open( with open(
f"./{save_path}/{self.day}_{self.n_particles}_{epochs}_{self.c0}_{self.c1}_{self.w_min}_{renewal}.csv", f"./{save_path}/{self.day}_{self.n_particles}_{epochs}_{self.c0}_{self.c1}_{self.w_min}_{renewal}.csv",
"a", "a",
) as f: ) as f:
f.write(f"{score[0]}, {score[1]}") f.write("\n")
if i != self.n_particles - 1:
f.write(", ")
TS = self.c0 + np.random.rand() * (self.c1 - self.c0) # print(f"loss min : {min_loss} | loss max : {max_loss} | acc min : {min_score} | acc max : {max_score}")
g_, g_sh, g_len = self._encode(self.g_best) # print(f"loss avg : {loss/self.n_particles} | acc avg : {acc/self.n_particles} | Best {renewal} : {self.g_best_score}")
decrement = (epochs - (_) + 1) / epochs print(
g_ = (1 - decrement) * g_ + decrement * TS f"loss min : {min_loss} | acc max : {max_score} | Best {renewal} : {self.g_best_score}"
self.g_best_ = self._decode(g_, g_sh, g_len) )
if save: gc.collect()
with open(
f"./{save_path}/{self.day}_{self.n_particles}_{epochs}_{self.c0}_{self.c1}_{self.w_min}_{renewal}.csv",
"a",
) as f:
f.write("\n")
print(f"epoch {_ + 1}/{epochs} finished") if check_point is not None:
# print(f"loss min : {min_loss} | loss max : {max_loss} | acc min : {min_score} | acc max : {max_score}") if _ % check_point == 0:
# print(f"loss avg : {loss/self.n_particles} | acc avg : {acc/self.n_particles} | Best {renewal} : {self.g_best_score}") os.makedirs(f"./{save_path}/{self.day}", exist_ok=True)
print( self._check_point_save(f"./{save_path}/{self.day}/ckpt-{_}")
f"loss min : {min_loss} | acc avg : {max_score} | Best {renewal} : {self.g_best_score}" self.avg_score = acc/self.n_particles
) except KeyboardInterrupt:
print("Keyboard Interrupt")
self.model_save(save_path)
print("model saved")
self.save_info(save_path)
print("info saved")
sys.exit(0)
except MemoryError:
print("Memory Error")
self.model_save(save_path)
print("model save")
self.save_info(save_path)
print("save info")
sys.exit(1)
except Exception as e:
print(e)
finally:
return self.g_best, self.g_best_score
gc.collect()
if check_point is not None:
if _ % check_point == 0:
os.makedirs(f"./{save_path}/{self.day}", exist_ok=True)
self._check_point_save(f"./{save_path}/{self.day}/check_point_{_}.h5")
return self.g_best, self.g_best_score
def get_best_model(self): def get_best_model(self):
model = keras.models.model_from_json(self.model.to_json()) model = keras.models.model_from_json(self.model.to_json())
@@ -290,16 +334,17 @@ class Optimizer:
} }
with open( with open(
f"./{path}/{self.day}_{self.loss}_{self.n_particles}_{self.g_best_score}.json", f"./{path}/{self.day}/{self.loss}_{self.n_particles}.json",
"w", "a",
) as f: ) as f:
json.dump(json_save, f, indent=4) json.dump(json_save, f, indent=4)
f.write(",\n")
def _check_point_save(self, save_path: str = f"./result/check_point"): def _check_point_save(self, save_path: str = f"./result/check_point"):
model = self.get_best_model() model = self.get_best_model()
model.save(save_path) model.save_weights(save_path)
def model_save(self, save_path: str = "./result/model"): def model_save(self, save_path: str = "./result"):
model = self.get_best_model() model = self.get_best_model()
model.save( model.save(
f"./{save_path}/{self.day}/{self.n_particles}_{self.c0}_{self.c1}_{self.w_min}.h5" f"./{save_path}/{self.day}/{self.n_particles}_{self.c0}_{self.c1}_{self.w_min}.h5"

View File

@@ -6,14 +6,14 @@ from tensorflow import keras
import numpy as np import numpy as np
class Particle: class Particle:
def __init__(self, model:keras.models, loss): def __init__(self, model:keras.models, loss, random:bool = False):
self.model = model self.model = model
self.loss = loss self.loss = loss
self.init_weights = self.model.get_weights() self.init_weights = self.model.get_weights()
i_w_,s_,l_ = self._encode(self.init_weights) i_w_,s_,l_ = self._encode(self.init_weights)
i_w_ = np.random.rand(len(i_w_)) / 5 - 0.10 i_w_ = np.random.rand(len(i_w_)) / 5 - 0.10
self.velocities = self._decode(i_w_,s_,l_) self.velocities = self._decode(i_w_,s_,l_)
self.random = random
self.best_score = 0 self.best_score = 0
self.best_weights = self.init_weights self.best_weights = self.init_weights
@@ -94,6 +94,8 @@ class Particle:
def _update_weights(self): def _update_weights(self):
encode_w, w_sh, w_len = self._encode(weights = self.model.get_weights()) encode_w, w_sh, w_len = self._encode(weights = self.model.get_weights())
encode_v, _, _ = self._encode(weights = self.velocities) encode_v, _, _ = self._encode(weights = self.velocities)
if self.random:
encode_v = -1 * encode_v
new_w = encode_w + encode_v new_w = encode_w + encode_v
self.model.set_weights(self._decode(new_w, w_sh, w_len)) self.model.set_weights(self._decode(new_w, w_sh, w_len))