mirror of
https://github.com/jung-geun/PSO.git
synced 2025-12-19 20:44:39 +09:00
23-11-01
tensorboard 선택 시 자동으로 프로세스 실행 비어있는 포트를 자동으로 탐색하여 오픈 이전 최적해와 비교하여 관성치를 높게 주는 방법을 일시 폐기 digits 테스트 추가 tensorboard 자동 설치 추가
This commit is contained in:
2
.vscode/settings.json
vendored
2
.vscode/settings.json
vendored
@@ -1,6 +1,6 @@
|
||||
{
|
||||
"[python]": {
|
||||
"editor.defaultFormatter": "ms-python.autopep8"
|
||||
"editor.defaultFormatter": "ms-python.black-formatter"
|
||||
},
|
||||
"python.formatting.provider": "none"
|
||||
}
|
||||
75
digits.py
Normal file
75
digits.py
Normal file
@@ -0,0 +1,75 @@
|
||||
import os
|
||||
import sys
|
||||
|
||||
import pandas as pd
|
||||
import tensorflow as tf
|
||||
from sklearn.datasets import load_digits
|
||||
from sklearn.model_selection import train_test_split
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.layers import Dense
|
||||
from tensorflow.keras.models import Sequential
|
||||
from tensorflow.keras.utils import to_categorical
|
||||
|
||||
from pso import optimizer
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(Dense(12, input_dim=64, activation="relu"))
|
||||
model.add(Dense(8, activation="relu"))
|
||||
model.add(Dense(10, activation="softmax"))
|
||||
|
||||
return model
|
||||
|
||||
|
||||
def get_data():
|
||||
digits = load_digits()
|
||||
X = digits.data
|
||||
y = digits.target
|
||||
|
||||
x = X.astype("float32")
|
||||
|
||||
y_class = to_categorical(y)
|
||||
|
||||
x_train, x_test, y_train, y_test = train_test_split(
|
||||
x, y_class, test_size=0.2, random_state=42, shuffle=True
|
||||
)
|
||||
return x_train, x_test, y_train, y_test
|
||||
|
||||
|
||||
x_train, x_test, y_train, y_test = get_data()
|
||||
model = make_model()
|
||||
|
||||
digits_pso = optimizer(
|
||||
model,
|
||||
loss="categorical_crossentropy",
|
||||
n_particles=300,
|
||||
c0=0.5,
|
||||
c1=0.3,
|
||||
w_min=0.2,
|
||||
w_max=0.9,
|
||||
negative_swarm=0,
|
||||
mutation_swarm=0.1,
|
||||
convergence_reset=True,
|
||||
convergence_reset_patience=10,
|
||||
convergence_reset_monitor="acc",
|
||||
convergence_reset_min_delta=0.001,
|
||||
)
|
||||
|
||||
digits_pso.fit(
|
||||
x_train,
|
||||
y_train,
|
||||
epochs=500,
|
||||
validate_data=(x_test, y_test),
|
||||
log=2,
|
||||
save_info=True,
|
||||
renewal="acc",
|
||||
log_name="digits",
|
||||
)
|
||||
|
||||
print("Done!")
|
||||
|
||||
sys.exit(0)
|
||||
32
iris.py
32
iris.py
@@ -1,18 +1,15 @@
|
||||
from pso import optimizer
|
||||
from tensorflow.keras.models import Sequential
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow import keras
|
||||
from sklearn.model_selection import train_test_split
|
||||
from sklearn.datasets import load_iris
|
||||
import gc
|
||||
import os
|
||||
import sys
|
||||
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
import gc
|
||||
|
||||
from sklearn.datasets import load_iris
|
||||
from sklearn.model_selection import train_test_split
|
||||
from tensorflow import keras
|
||||
from tensorflow.keras import layers
|
||||
from tensorflow.keras.models import Sequential
|
||||
|
||||
from pso import optimizer
|
||||
|
||||
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
@@ -40,22 +37,21 @@ def load_data():
|
||||
model = make_model()
|
||||
x_train, x_test, y_train, y_test = load_data()
|
||||
|
||||
loss = ["categorical_crossentropy", "mean_squared_error"]
|
||||
|
||||
pso_iris = optimizer(
|
||||
model,
|
||||
loss=loss[1],
|
||||
loss="categorical_crossentropy",
|
||||
n_particles=100,
|
||||
c0=0.35,
|
||||
c1=0.6,
|
||||
w_min=0.5,
|
||||
c0=0.5,
|
||||
c1=0.3,
|
||||
w_min=0.2,
|
||||
w_max=0.9,
|
||||
negative_swarm=0,
|
||||
mutation_swarm=0.2,
|
||||
mutation_swarm=0.1,
|
||||
convergence_reset=True,
|
||||
convergence_reset_patience=10,
|
||||
convergence_reset_monitor="mse",
|
||||
convergence_reset_min_delta=0.05,
|
||||
convergence_reset_min_delta=0.001,
|
||||
)
|
||||
|
||||
best_score = pso_iris.fit(
|
||||
@@ -65,7 +61,7 @@ best_score = pso_iris.fit(
|
||||
save_info=True,
|
||||
log=2,
|
||||
log_name="iris",
|
||||
renewal="acc",
|
||||
renewal="mse",
|
||||
check_point=25,
|
||||
validate_data=(x_test, y_test),
|
||||
)
|
||||
|
||||
29
mnist.py
29
mnist.py
@@ -22,10 +22,8 @@ def get_data():
|
||||
|
||||
y_train, y_test = tf.one_hot(y_train, 10), tf.one_hot(y_test, 10)
|
||||
|
||||
x_train, x_test = tf.convert_to_tensor(
|
||||
x_train), tf.convert_to_tensor(x_test)
|
||||
y_train, y_test = tf.convert_to_tensor(
|
||||
y_train), tf.convert_to_tensor(y_test)
|
||||
x_train, x_test = tf.convert_to_tensor(x_train), tf.convert_to_tensor(x_test)
|
||||
y_train, y_test = tf.convert_to_tensor(y_train), tf.convert_to_tensor(y_test)
|
||||
|
||||
print(f"x_train : {x_train[0].shape} | y_train : {y_train[0].shape}")
|
||||
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
|
||||
@@ -36,8 +34,7 @@ def get_data():
|
||||
def make_model():
|
||||
model = Sequential()
|
||||
model.add(
|
||||
Conv2D(32, kernel_size=(5, 5), activation="relu",
|
||||
input_shape=(28, 28, 1))
|
||||
Conv2D(32, kernel_size=(5, 5), activation="relu", input_shape=(28, 28, 1))
|
||||
)
|
||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||
model.add(Dropout(0.5))
|
||||
@@ -75,31 +72,31 @@ loss = [
|
||||
pso_mnist = optimizer(
|
||||
model,
|
||||
loss="categorical_crossentropy",
|
||||
n_particles=100,
|
||||
n_particles=500,
|
||||
c0=0.5,
|
||||
c1=0.8,
|
||||
w_min=0.6,
|
||||
c1=0.3,
|
||||
w_min=0.2,
|
||||
w_max=0.9,
|
||||
negative_swarm=0.0,
|
||||
mutation_swarm=0.2,
|
||||
mutation_swarm=0.1,
|
||||
convergence_reset=True,
|
||||
convergence_reset_patience=10,
|
||||
convergence_reset_monitor="loss",
|
||||
convergence_reset_min_delta=0.05,
|
||||
convergence_reset_monitor="mse",
|
||||
convergence_reset_min_delta=0.005,
|
||||
)
|
||||
|
||||
best_score = pso_mnist.fit(
|
||||
x_train,
|
||||
y_train,
|
||||
epochs=300,
|
||||
epochs=500,
|
||||
save_info=True,
|
||||
log=2,
|
||||
log_name="mnist",
|
||||
renewal="loss",
|
||||
renewal="mse",
|
||||
check_point=25,
|
||||
empirical_balance=True,
|
||||
empirical_balance=False,
|
||||
dispersion=False,
|
||||
batch_size=2048,
|
||||
batch_size=10000,
|
||||
back_propagation=False,
|
||||
validate_data=(x_test, y_test),
|
||||
)
|
||||
|
||||
@@ -1,7 +1,9 @@
|
||||
from .optimizer import Optimizer as optimizer
|
||||
from .particle import Particle as particle
|
||||
|
||||
__version__ = "1.0.3"
|
||||
__version__ = "1.0.4"
|
||||
|
||||
print("pso2keras version : " + __version__)
|
||||
|
||||
__all__ = [
|
||||
"optimizer",
|
||||
|
||||
115
pso/optimizer.py
115
pso/optimizer.py
@@ -1,13 +1,17 @@
|
||||
import gc
|
||||
import json
|
||||
import os
|
||||
import socket
|
||||
import subprocess
|
||||
import sys
|
||||
from datetime import datetime
|
||||
|
||||
import numpy as np
|
||||
import tensorflow as tf
|
||||
from tensorboard.plugins.hparams import api as hp
|
||||
from tensorflow import keras
|
||||
from tqdm.auto import tqdm
|
||||
import atexit
|
||||
|
||||
from .particle import Particle
|
||||
|
||||
@@ -21,6 +25,14 @@ if gpus:
|
||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||
|
||||
|
||||
def find_free_port():
|
||||
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
|
||||
sock.bind(("localhost", 0))
|
||||
port = sock.getsockname()[1]
|
||||
sock.close()
|
||||
return port
|
||||
|
||||
|
||||
class Optimizer:
|
||||
"""
|
||||
particle swarm optimization
|
||||
@@ -33,9 +45,9 @@ class Optimizer:
|
||||
loss: any = None,
|
||||
n_particles: int = None,
|
||||
c0: float = 0.5,
|
||||
c1: float = 1.5,
|
||||
w_min: float = 0.5,
|
||||
w_max: float = 1.5,
|
||||
c1: float = 0.3,
|
||||
w_min: float = 0.2,
|
||||
w_max: float = 0.9,
|
||||
negative_swarm: float = 0,
|
||||
mutation_swarm: float = 0,
|
||||
np_seed: int = None,
|
||||
@@ -94,11 +106,7 @@ class Optimizer:
|
||||
if random_state is not None:
|
||||
np.random.set_state(random_state)
|
||||
|
||||
model.compile(
|
||||
loss=loss,
|
||||
optimizer="adam",
|
||||
metrics=["accuracy", "mse"]
|
||||
)
|
||||
model.compile(loss=loss, optimizer="adam", metrics=["accuracy", "mse"])
|
||||
self.model = model # 모델 구조
|
||||
self.loss = loss # 손실함수
|
||||
self.n_particles = n_particles # 파티클 개수
|
||||
@@ -124,11 +132,12 @@ class Optimizer:
|
||||
|
||||
print(f"start running time : {self.day}")
|
||||
for i in tqdm(range(self.n_particles), desc="Initializing Particles"):
|
||||
|
||||
self.particles[i] = Particle(
|
||||
model,
|
||||
self.loss,
|
||||
negative=True if i < self.negative_swarm * self.n_particles else False,
|
||||
negative=True
|
||||
if i < self.negative_swarm * self.n_particles
|
||||
else False,
|
||||
mutation=self.mutation_swarm,
|
||||
converge_reset=convergence_reset,
|
||||
converge_reset_patience=convergence_reset_patience,
|
||||
@@ -308,8 +317,9 @@ class Optimizer:
|
||||
|
||||
def __getBatchSlice(self, batch_size):
|
||||
return list(
|
||||
tf.data.Dataset.from_tensor_slices(
|
||||
(self.x, self.y)).shuffle(len(self.x)).batch(batch_size)
|
||||
tf.data.Dataset.from_tensor_slices((self.x, self.y))
|
||||
.shuffle(len(self.x))
|
||||
.batch(batch_size)
|
||||
)
|
||||
|
||||
def getDataset(self):
|
||||
@@ -353,7 +363,13 @@ class Optimizer:
|
||||
raise ValueError("x, y shape error")
|
||||
|
||||
if log not in [0, 1, 2]:
|
||||
raise ValueError("log not in [0, 1, 2]")
|
||||
raise ValueError(
|
||||
"""log not in [0, 1, 2]
|
||||
0 : log 기록 안함
|
||||
1 : csv
|
||||
2 : tensorboard
|
||||
"""
|
||||
)
|
||||
|
||||
if renewal not in ["acc", "loss", "mse"]:
|
||||
raise ValueError("renewal not in ['acc', 'loss', 'mse']")
|
||||
@@ -394,7 +410,19 @@ class Optimizer:
|
||||
self.train_summary_writer[i] = tf.summary.create_file_writer(
|
||||
train_log_dir + f"/{i}"
|
||||
)
|
||||
|
||||
port = find_free_port()
|
||||
tensorboard_precess = subprocess.Popen(
|
||||
[
|
||||
"tensorboard",
|
||||
"--logdir",
|
||||
self.log_path,
|
||||
"--port",
|
||||
str(port),
|
||||
]
|
||||
)
|
||||
tensorboard_url = f"http://localhost:{port}"
|
||||
print(f"tensorboard url : {tensorboard_url}")
|
||||
atexit.register(tensorboard_precess.kill)
|
||||
elif check_point is not None or log == 1:
|
||||
if not os.path.exists(self.log_path):
|
||||
os.makedirs(self.log_path, exist_ok=True)
|
||||
@@ -408,7 +436,7 @@ class Optimizer:
|
||||
model_.compile(
|
||||
loss=self.loss,
|
||||
optimizer="adam",
|
||||
metrics=["accuracy", "mse"]
|
||||
metrics=["accuracy", "mse"],
|
||||
)
|
||||
model_.fit(x, y, epochs=1, verbose=0)
|
||||
score = model_.evaluate(x, y, verbose=1)
|
||||
@@ -463,8 +491,7 @@ class Optimizer:
|
||||
if dispersion:
|
||||
ts = weight_min + np.random.rand() * (weight_max - weight_min)
|
||||
|
||||
g_, g_sh, g_len = self._encode(
|
||||
Particle.g_best_weights)
|
||||
g_, g_sh, g_len = self._encode(Particle.g_best_weights)
|
||||
decrement = (epochs - epoch + 1) / epochs
|
||||
g_ = (1 - decrement) * g_ + decrement * ts
|
||||
g_best = self._decode_(g_, g_sh, g_len)
|
||||
@@ -472,8 +499,7 @@ class Optimizer:
|
||||
if empirical_balance:
|
||||
if np.random.rand() < np.exp(-(epoch) / epochs):
|
||||
w_p_ = self._f(
|
||||
x_batch, y_batch, self.particles[i].get_best_weights(
|
||||
)
|
||||
x_batch, y_batch, self.particles[i].get_best_weights()
|
||||
)
|
||||
w_g_ = self._f(x_batch, y_batch, g_best)
|
||||
w_p = w_p_ / (w_p_ + w_g_)
|
||||
@@ -491,9 +517,7 @@ class Optimizer:
|
||||
1
|
||||
/ (
|
||||
self.n_particles
|
||||
* np.linalg.norm(
|
||||
weight_min - weight_max
|
||||
)
|
||||
* np.linalg.norm(weight_min - weight_max)
|
||||
)
|
||||
* sigma_post
|
||||
)
|
||||
@@ -543,9 +567,7 @@ class Optimizer:
|
||||
if log == 2:
|
||||
with self.train_summary_writer[i].as_default():
|
||||
tf.summary.scalar("loss", score[0], step=epoch + 1)
|
||||
tf.summary.scalar(
|
||||
"accuracy", score[1], step=epoch + 1
|
||||
)
|
||||
tf.summary.scalar("accuracy", score[1], step=epoch + 1)
|
||||
tf.summary.scalar("mse", score[2], step=epoch + 1)
|
||||
|
||||
if renewal == "loss":
|
||||
@@ -554,6 +576,11 @@ class Optimizer:
|
||||
# 각 점수 갱신
|
||||
min_loss, max_acc, min_mse = score
|
||||
|
||||
best_particle_index = i
|
||||
elif score[0] == min_loss:
|
||||
if score[1] > max_acc:
|
||||
min_loss, max_acc, min_mse = score
|
||||
|
||||
best_particle_index = i
|
||||
elif renewal == "acc":
|
||||
# 최고 점수 보다 높거나 같을 경우
|
||||
@@ -562,14 +589,22 @@ class Optimizer:
|
||||
min_loss, max_acc, min_mse = score
|
||||
|
||||
best_particle_index = i
|
||||
elif score[1] == max_acc:
|
||||
if score[2] < min_mse:
|
||||
min_loss, max_acc, min_mse = score
|
||||
|
||||
best_particle_index = i
|
||||
|
||||
elif renewal == "mse":
|
||||
if score[2] < min_mse:
|
||||
min_loss, max_acc, min_mse = score
|
||||
|
||||
best_particle_index = i
|
||||
elif score[2] == min_mse:
|
||||
if score[1] > max_acc:
|
||||
min_loss, max_acc, min_mse = score
|
||||
|
||||
best_particle_index = i
|
||||
particle_sum += score[1]
|
||||
|
||||
if log == 1:
|
||||
@@ -587,34 +622,28 @@ class Optimizer:
|
||||
if renewal == "loss":
|
||||
if min_loss <= Particle.g_best_score[0]:
|
||||
if min_loss < Particle.g_best_score[0]:
|
||||
self.particles[best_particle_index].update_global_best(
|
||||
)
|
||||
self.particles[best_particle_index].update_global_best()
|
||||
else:
|
||||
if max_acc > Particle.g_best_score[1]:
|
||||
self.particles[best_particle_index].update_global_best(
|
||||
)
|
||||
self.particles[best_particle_index].update_global_best()
|
||||
elif renewal == "acc":
|
||||
if max_acc >= Particle.g_best_score[1]:
|
||||
# 최고 점수 보다 높을 경우
|
||||
if max_acc > Particle.g_best_score[1]:
|
||||
# 최고 점수 갱신
|
||||
self.particles[best_particle_index].update_global_best(
|
||||
)
|
||||
self.particles[best_particle_index].update_global_best()
|
||||
# 최고 점수 와 같을 경우
|
||||
else:
|
||||
# 최저 loss 보다 낮을 경우
|
||||
if min_loss < Particle.g_best_score[0]:
|
||||
self.particles[best_particle_index].update_global_best(
|
||||
)
|
||||
self.particles[best_particle_index].update_global_best()
|
||||
elif renewal == "mse":
|
||||
if min_mse <= Particle.g_best_score[2]:
|
||||
if min_mse < Particle.g_best_score[2]:
|
||||
self.particles[best_particle_index].update_global_best(
|
||||
)
|
||||
self.particles[best_particle_index].update_global_best()
|
||||
else:
|
||||
if max_acc > Particle.g_best_score[1]:
|
||||
self.particles[best_particle_index].update_global_best(
|
||||
)
|
||||
self.particles[best_particle_index].update_global_best()
|
||||
# 최고 점수 갱신
|
||||
epochs_pbar.set_description(
|
||||
f"best - loss: {Particle.g_best_score[0]:.4f} - acc: {Particle.g_best_score[1]:.4f} - mse: {Particle.g_best_score[2]:.4f}"
|
||||
@@ -623,9 +652,12 @@ class Optimizer:
|
||||
if check_point is not None:
|
||||
if epoch % check_point == 0:
|
||||
os.makedirs(
|
||||
f"./logs/{log_name}/{self.day}", exist_ok=True)
|
||||
f"./logs/{log_name}/{self.day}",
|
||||
exist_ok=True,
|
||||
)
|
||||
self._check_point_save(
|
||||
f"./logs/{log_name}/{self.day}/ckpt-{epoch}")
|
||||
f"./logs/{log_name}/{self.day}/ckpt-{epoch}"
|
||||
)
|
||||
|
||||
tf.keras.backend.reset_uids()
|
||||
tf.keras.backend.clear_session()
|
||||
@@ -661,7 +693,7 @@ class Optimizer:
|
||||
model.compile(
|
||||
loss=self.loss,
|
||||
optimizer="adam",
|
||||
metrics=["accuracy", "mse"]
|
||||
metrics=["accuracy", "mse"],
|
||||
)
|
||||
|
||||
return model
|
||||
@@ -741,8 +773,7 @@ class Optimizer:
|
||||
x, y = valid_data
|
||||
model = self.get_best_model()
|
||||
score = model.evaluate(x, y, verbose=1)
|
||||
print(
|
||||
f"model score - loss: {score[0]} - acc: {score[1]} - mse: {score[2]}")
|
||||
print(f"model score - loss: {score[0]} - acc: {score[1]} - mse: {score[2]}")
|
||||
model.save(
|
||||
f"./{self.log_path}/model_{score[0 if self.renewal == 'loss' else 1 if self.renewal == 'acc' else 2 ]}.h5"
|
||||
)
|
||||
|
||||
@@ -54,11 +54,11 @@ class Particle:
|
||||
|
||||
self.__reset_particle()
|
||||
self.best_weights = self.model.get_weights()
|
||||
self.before_best = self.model.get_weights()
|
||||
# self.before_best = self.model.get_weights()
|
||||
self.negative = negative
|
||||
self.mutation = mutation
|
||||
self.best_score = [np.inf, 0, np.inf]
|
||||
self.before_w = 0
|
||||
# self.before_w = 0
|
||||
self.score_history = []
|
||||
self.converge_reset = converge_reset
|
||||
self.converge_reset_patience = converge_reset_patience
|
||||
@@ -196,7 +196,7 @@ class Particle:
|
||||
del i_w_, i_s, i_l
|
||||
self.score_history = []
|
||||
|
||||
def _update_velocity(self, local_rate, global_rate, w):
|
||||
def _velocity_calculation(self, local_rate, global_rate, w):
|
||||
"""
|
||||
현재 속도 업데이트
|
||||
|
||||
@@ -209,20 +209,20 @@ class Particle:
|
||||
encode_v, v_sh, v_len = self._encode(weights=self.velocities)
|
||||
encode_p, p_sh, p_len = self._encode(weights=self.best_weights)
|
||||
encode_g, g_sh, g_len = self._encode(weights=Particle.g_best_weights)
|
||||
encode_before, before_sh, before_len = self._encode(
|
||||
weights=self.before_best
|
||||
)
|
||||
# encode_before, before_sh, before_len = self._encode(
|
||||
# weights=self.before_best
|
||||
# )
|
||||
r_0 = np.random.rand()
|
||||
r_1 = np.random.rand()
|
||||
|
||||
# 이전 전역 최적해와 현재 전역 최적해가 다르면 관성을 순간적으로 증가 - 값이 바뀔 경우 기존 관성을 특정 기간동안 유지
|
||||
if not np.array_equal(encode_before, encode_g, equal_nan=True):
|
||||
# if not np.array_equal(encode_before, encode_g, equal_nan=True):
|
||||
# 이전 가중치 중요도의 1.5 배로 관성을 증가
|
||||
self.before_w = w * 0.5
|
||||
w = w + self.before_w
|
||||
else:
|
||||
self.before_w *= 0.75
|
||||
w = w + self.before_w
|
||||
# self.before_w = w * 0.5
|
||||
# w = w + self.before_w
|
||||
# else:
|
||||
# self.before_w *= 0.75
|
||||
# w = w + self.before_w
|
||||
|
||||
if self.negative:
|
||||
# 지역 최적해와 전역 최적해를 음수로 사용하여 전역 탐색을 유도
|
||||
@@ -251,10 +251,9 @@ class Particle:
|
||||
del encode_v, v_sh, v_len
|
||||
del encode_p, p_sh, p_len
|
||||
del encode_g, g_sh, g_len
|
||||
del encode_before, before_sh, before_len
|
||||
del r_0, r_1
|
||||
|
||||
def _update_velocity_w(self, local_rate, global_rate, w, w_p, w_g):
|
||||
def _velocity_calculation_w(self, local_rate, global_rate, w, w_p, w_g):
|
||||
"""
|
||||
현재 속도 업데이트
|
||||
기본 업데이트의 변형으로 지역 최적해와 전역 최적해를 분산시켜 조기 수렴을 방지
|
||||
@@ -270,18 +269,18 @@ class Particle:
|
||||
encode_v, v_sh, v_len = self._encode(weights=self.velocities)
|
||||
encode_p, p_sh, p_len = self._encode(weights=self.best_weights)
|
||||
encode_g, g_sh, g_len = self._encode(weights=Particle.g_best_weights)
|
||||
encode_before, before_sh, before_len = self._encode(
|
||||
weights=self.before_best
|
||||
)
|
||||
# encode_before, before_sh, before_len = self._encode(
|
||||
# weights=self.before_best
|
||||
# )
|
||||
r_0 = np.random.rand()
|
||||
r_1 = np.random.rand()
|
||||
|
||||
if not np.array_equal(encode_before, encode_g, equal_nan=True):
|
||||
self.before_w = w * 0.5
|
||||
w = w + self.before_w
|
||||
else:
|
||||
self.before_w *= 0.75
|
||||
w = w + self.before_w
|
||||
# if not np.array_equal(encode_before, encode_g, equal_nan=True):
|
||||
# self.before_w = w * 0.5
|
||||
# w = w + self.before_w
|
||||
# else:
|
||||
# self.before_w *= 0.75
|
||||
# w = w + self.before_w
|
||||
|
||||
if self.negative:
|
||||
new_v = (
|
||||
@@ -306,10 +305,9 @@ class Particle:
|
||||
del encode_v, v_sh, v_len
|
||||
del encode_p, p_sh, p_len
|
||||
del encode_g, g_sh, g_len
|
||||
del encode_before, before_sh, before_len
|
||||
del r_0, r_1
|
||||
|
||||
def _update_weights(self):
|
||||
def _position_update(self):
|
||||
"""
|
||||
가중치 업데이트
|
||||
"""
|
||||
@@ -337,8 +335,8 @@ class Particle:
|
||||
Returns:
|
||||
list: 현재 파티클의 점수
|
||||
"""
|
||||
self._update_velocity(local_rate, global_rate, w)
|
||||
self._update_weights()
|
||||
self._velocity_calculation(local_rate, global_rate, w)
|
||||
self._position_update()
|
||||
|
||||
score = self.get_score(x, y, renewal)
|
||||
|
||||
@@ -385,8 +383,8 @@ class Particle:
|
||||
Returns:
|
||||
float: 현재 파티클의 점수
|
||||
"""
|
||||
self._update_velocity_w(local_rate, global_rate, w, w_p, w_g)
|
||||
self._update_weights()
|
||||
self._velocity_calculation_w(local_rate, global_rate, w, w_p, w_g)
|
||||
self._position_update()
|
||||
|
||||
score = self.get_score(x, y, renewal)
|
||||
|
||||
|
||||
@@ -1,6 +1,8 @@
|
||||
ipython
|
||||
keras<=2.11.0
|
||||
keras
|
||||
numpy
|
||||
pandas==1.5.3
|
||||
tensorflow<=2.11.1
|
||||
tqdm==4.65.0
|
||||
pandas
|
||||
tensorflow
|
||||
tqdm
|
||||
scikit-learn
|
||||
tensorboard
|
||||
8
setup.py
8
setup.py
@@ -16,8 +16,11 @@ setup(
|
||||
"numpy",
|
||||
"pandas",
|
||||
"ipython",
|
||||
"tensorflow<=2.11.1,>=2.8.0",
|
||||
"keras<=2.11.1,>=2.8.0",
|
||||
"matplotlib",
|
||||
"tensorflow",
|
||||
"keras",
|
||||
"scikit-learn",
|
||||
"tensorboard",
|
||||
],
|
||||
packages=find_packages(exclude=[]),
|
||||
keywords=["pso", "tensorflow", "keras"],
|
||||
@@ -33,5 +36,6 @@ setup(
|
||||
"Programming Language :: Python :: 3.8",
|
||||
"Programming Language :: Python :: 3.9",
|
||||
"Programming Language :: Python :: 3.10",
|
||||
"Programming Language :: Python :: 3.11",
|
||||
],
|
||||
)
|
||||
|
||||
Reference in New Issue
Block a user