mirror of
https://github.com/jung-geun/PSO.git
synced 2025-12-19 20:44:39 +09:00
23-07-27
순간 가중치 변화량 조정 0.6 > 0.75 + evaluate multiproessing false
This commit is contained in:
9
mnist.py
9
mnist.py
@@ -68,7 +68,7 @@ def make_model():
|
||||
|
||||
def random_state():
|
||||
with open(
|
||||
"result/mnist/20230720-192726/mean_squared_error_[0.4970000088214874, 0.10073449462652206].json",
|
||||
"result/mnist/20230723-061626/mean_squared_error_[0.6384999752044678, 0.0723000094294548].json",
|
||||
"r",
|
||||
) as f:
|
||||
json_ = json.load(f)
|
||||
@@ -101,7 +101,7 @@ loss = [
|
||||
"mean_absolute_percentage_error",
|
||||
]
|
||||
|
||||
# rs = random_state()
|
||||
rs = random_state()
|
||||
|
||||
pso_mnist = Optimizer(
|
||||
model,
|
||||
@@ -110,17 +110,18 @@ pso_mnist = Optimizer(
|
||||
c0=0.3,
|
||||
c1=0.5,
|
||||
w_min=0.4,
|
||||
w_max=0.9,
|
||||
w_max=0.7,
|
||||
negative_swarm=0.1,
|
||||
mutation_swarm=0.3,
|
||||
particle_min=-4,
|
||||
particle_max=4,
|
||||
random_state=rs,
|
||||
)
|
||||
|
||||
best_score = pso_mnist.fit(
|
||||
x_train,
|
||||
y_train,
|
||||
epochs=200,
|
||||
epochs=250,
|
||||
save_info=True,
|
||||
log=2,
|
||||
log_name="mnist",
|
||||
|
||||
@@ -88,39 +88,50 @@ class Optimizer:
|
||||
|
||||
self.save_path = None # 저장 위치
|
||||
self.renewal = "acc"
|
||||
self.Dispersion = False
|
||||
self.dispersion = False
|
||||
self.day = datetime.now().strftime("%Y%m%d-%H%M%S")
|
||||
|
||||
self.empirical_balance = False
|
||||
negative_count = 0
|
||||
|
||||
print(f"start running time : {self.day}")
|
||||
for i in tqdm(range(self.n_particles), desc="Initializing Particles"):
|
||||
model_ = keras.models.model_from_json(model.to_json())
|
||||
w_, sh_, len_ = self._encode(model_.get_weights())
|
||||
w_ = np.random.uniform(particle_min, particle_max, len(w_))
|
||||
model_.set_weights(self._decode(w_, sh_, len_))
|
||||
self.train_summary_writer = [None] * self.n_particles
|
||||
try:
|
||||
print(f"start running time : {self.day}")
|
||||
for i in tqdm(range(self.n_particles), desc="Initializing Particles"):
|
||||
model_ = keras.models.model_from_json(model.to_json())
|
||||
w_, sh_, len_ = self._encode(model_.get_weights())
|
||||
w_ = np.random.uniform(particle_min, particle_max, len(w_))
|
||||
model_.set_weights(self._decode(w_, sh_, len_))
|
||||
|
||||
model_.compile(loss=self.loss, optimizer="sgd", metrics=["accuracy"])
|
||||
self.particles[i] = Particle(
|
||||
model_,
|
||||
loss,
|
||||
negative=True if i < negative_swarm * self.n_particles else False,
|
||||
mutation=mutation_swarm,
|
||||
)
|
||||
if i < negative_swarm * self.n_particles:
|
||||
negative_count += 1
|
||||
# del m, init_weights, w_, sh_, len_
|
||||
gc.collect()
|
||||
tf.keras.backend.reset_uids()
|
||||
tf.keras.backend.clear_session()
|
||||
|
||||
print(f"negative swarm : {negative_count} / {self.n_particles}")
|
||||
print(f"mutation swarm : {mutation_swarm * 100}%")
|
||||
|
||||
model_.compile(loss=self.loss, optimizer="sgd", metrics=["accuracy"])
|
||||
self.particles[i] = Particle(
|
||||
model_,
|
||||
loss,
|
||||
negative=True if i < negative_swarm * self.n_particles else False,
|
||||
mutation=mutation_swarm,
|
||||
)
|
||||
if i < negative_swarm * self.n_particles:
|
||||
negative_count += 1
|
||||
# del m, init_weights, w_, sh_, len_
|
||||
gc.collect()
|
||||
tf.keras.backend.reset_uids()
|
||||
tf.keras.backend.clear_session()
|
||||
|
||||
print(f"negative swarm : {negative_count} / {self.n_particles}")
|
||||
print(f"mutation swarm : {mutation_swarm * 100}%")
|
||||
|
||||
gc.collect()
|
||||
tf.keras.backend.reset_uids()
|
||||
tf.keras.backend.clear_session()
|
||||
except KeyboardInterrupt:
|
||||
print("Ctrl + C : Stop Training")
|
||||
sys.exit(0)
|
||||
except MemoryError:
|
||||
print("Memory Error : Stop Training")
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
|
||||
def __del__(self):
|
||||
del self.model
|
||||
@@ -239,7 +250,7 @@ class Optimizer:
|
||||
"""
|
||||
self.save_path = save_path
|
||||
self.empirical_balance = empirical_balance
|
||||
self.Dispersion = dispersion
|
||||
self.dispersion = dispersion
|
||||
|
||||
self.renewal = renewal
|
||||
try:
|
||||
@@ -248,7 +259,6 @@ class Optimizer:
|
||||
assert log_name is not None, "log_name is None"
|
||||
|
||||
train_log_dir = f"logs/{log_name}/{self.day}/train"
|
||||
self.train_summary_writer = [None] * self.n_particles
|
||||
for i in range(self.n_particles):
|
||||
self.train_summary_writer[i] = tf.summary.create_file_writer(
|
||||
train_log_dir + f"/{i}"
|
||||
@@ -263,7 +273,8 @@ class Optimizer:
|
||||
os.makedirs(save_path, exist_ok=True)
|
||||
except ValueError as e:
|
||||
print(e)
|
||||
sys.exit(1)
|
||||
except Exception as e:
|
||||
print(e)
|
||||
|
||||
for i in tqdm(range(self.n_particles), desc="Initializing velocity"):
|
||||
p = self.particles[i]
|
||||
@@ -534,7 +545,7 @@ class Optimizer:
|
||||
"w_max": self.w_max,
|
||||
"loss_method": self.loss,
|
||||
"empirical_balance": self.empirical_balance,
|
||||
"Dispersion": self.Dispersion,
|
||||
"dispersion": self.dispersion,
|
||||
"negative_swarm": self.negative_swarm,
|
||||
"mutation_swarm": self.mutation_swarm,
|
||||
"random_state_0": self.random_state[0],
|
||||
|
||||
@@ -1,9 +1,6 @@
|
||||
import gc
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorflow import keras
|
||||
|
||||
from tensorflow import keras
|
||||
|
||||
class Particle:
|
||||
"""
|
||||
@@ -28,9 +25,9 @@ class Particle:
|
||||
self.model = model
|
||||
self.loss = loss
|
||||
init_weights = self.model.get_weights()
|
||||
i_w_, s_, l_ = self._encode(init_weights)
|
||||
i_w_, i_s, i_l = self._encode(init_weights)
|
||||
i_w_ = np.random.uniform(-0.5, 0.5, len(i_w_))
|
||||
self.velocities = self._decode(i_w_, s_, l_)
|
||||
self.velocities = self._decode(i_w_, i_s, i_l)
|
||||
self.negative = negative
|
||||
self.mutation = mutation
|
||||
self.best_score = 0
|
||||
@@ -38,7 +35,7 @@ class Particle:
|
||||
self.before_best = init_weights
|
||||
self.before_w = 0
|
||||
|
||||
del i_w_, s_, l_
|
||||
del i_w_, i_s, i_l
|
||||
del init_weights
|
||||
|
||||
def __del__(self):
|
||||
@@ -65,9 +62,9 @@ class Particle:
|
||||
shape = []
|
||||
for layer in weights:
|
||||
shape.append(layer.shape)
|
||||
w_ = layer.reshape(-1)
|
||||
length.append(len(w_))
|
||||
w_gpu = np.append(w_gpu, w_)
|
||||
w_tmp = layer.reshape(-1)
|
||||
length.append(len(w_tmp))
|
||||
w_gpu = np.append(w_gpu, w_tmp)
|
||||
|
||||
return w_gpu, shape, length
|
||||
|
||||
@@ -109,7 +106,7 @@ class Particle:
|
||||
Returns:
|
||||
(float): 점수
|
||||
"""
|
||||
score = self.model.evaluate(x, y, verbose=0, use_multiprocessing=True)
|
||||
score = self.model.evaluate(x, y, verbose=0)
|
||||
if renewal == "acc":
|
||||
if score[1] > self.best_score:
|
||||
self.best_score = score[1]
|
||||
@@ -142,10 +139,10 @@ class Particle:
|
||||
encode_before, before_sh, before_len = self._encode(weights=self.before_best)
|
||||
|
||||
if (encode_before != encode_g).all():
|
||||
self.before_w = w
|
||||
w = w + (self.before_w)
|
||||
self.before_w = w * 0.6
|
||||
w = w + self.before_w
|
||||
else:
|
||||
self.before_w *= 0.6
|
||||
self.before_w *= 0.75
|
||||
w = w + self.before_w
|
||||
|
||||
if self.negative:
|
||||
|
||||
Reference in New Issue
Block a user