tensorboard 선택 시 자동으로 프로세스 실행
비어있는 포트를 자동으로 탐색하여 오픈
이전 최적해와 비교하여 관성치를 높게 주는 방법을 일시 폐기
digits 테스트 추가
tensorboard 자동 설치 추가
This commit is contained in:
jung-geun
2023-11-01 23:40:31 +09:00
parent 389027409d
commit 80695f304d
9 changed files with 218 additions and 113 deletions

View File

@@ -1,6 +1,6 @@
{ {
"[python]": { "[python]": {
"editor.defaultFormatter": "ms-python.autopep8" "editor.defaultFormatter": "ms-python.black-formatter"
}, },
"python.formatting.provider": "none" "python.formatting.provider": "none"
} }

75
digits.py Normal file
View File

@@ -0,0 +1,75 @@
import os
import sys
import pandas as pd
import tensorflow as tf
from sklearn.datasets import load_digits
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.layers import Dense
from tensorflow.keras.models import Sequential
from tensorflow.keras.utils import to_categorical
from pso import optimizer
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
def make_model():
model = Sequential()
model.add(Dense(12, input_dim=64, activation="relu"))
model.add(Dense(8, activation="relu"))
model.add(Dense(10, activation="softmax"))
return model
def get_data():
digits = load_digits()
X = digits.data
y = digits.target
x = X.astype("float32")
y_class = to_categorical(y)
x_train, x_test, y_train, y_test = train_test_split(
x, y_class, test_size=0.2, random_state=42, shuffle=True
)
return x_train, x_test, y_train, y_test
x_train, x_test, y_train, y_test = get_data()
model = make_model()
digits_pso = optimizer(
model,
loss="categorical_crossentropy",
n_particles=300,
c0=0.5,
c1=0.3,
w_min=0.2,
w_max=0.9,
negative_swarm=0,
mutation_swarm=0.1,
convergence_reset=True,
convergence_reset_patience=10,
convergence_reset_monitor="acc",
convergence_reset_min_delta=0.001,
)
digits_pso.fit(
x_train,
y_train,
epochs=500,
validate_data=(x_test, y_test),
log=2,
save_info=True,
renewal="acc",
log_name="digits",
)
print("Done!")
sys.exit(0)

32
iris.py
View File

@@ -1,18 +1,15 @@
from pso import optimizer
from tensorflow.keras.models import Sequential
from tensorflow.keras import layers
from tensorflow import keras
from sklearn.model_selection import train_test_split
from sklearn.datasets import load_iris
import gc
import os import os
import sys import sys
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
import gc
from sklearn.datasets import load_iris
from sklearn.model_selection import train_test_split
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from pso import optimizer
def make_model(): def make_model():
model = Sequential() model = Sequential()
@@ -40,22 +37,21 @@ def load_data():
model = make_model() model = make_model()
x_train, x_test, y_train, y_test = load_data() x_train, x_test, y_train, y_test = load_data()
loss = ["categorical_crossentropy", "mean_squared_error"]
pso_iris = optimizer( pso_iris = optimizer(
model, model,
loss=loss[1], loss="categorical_crossentropy",
n_particles=100, n_particles=100,
c0=0.35, c0=0.5,
c1=0.6, c1=0.3,
w_min=0.5, w_min=0.2,
w_max=0.9, w_max=0.9,
negative_swarm=0, negative_swarm=0,
mutation_swarm=0.2, mutation_swarm=0.1,
convergence_reset=True, convergence_reset=True,
convergence_reset_patience=10, convergence_reset_patience=10,
convergence_reset_monitor="mse", convergence_reset_monitor="mse",
convergence_reset_min_delta=0.05, convergence_reset_min_delta=0.001,
) )
best_score = pso_iris.fit( best_score = pso_iris.fit(
@@ -65,7 +61,7 @@ best_score = pso_iris.fit(
save_info=True, save_info=True,
log=2, log=2,
log_name="iris", log_name="iris",
renewal="acc", renewal="mse",
check_point=25, check_point=25,
validate_data=(x_test, y_test), validate_data=(x_test, y_test),
) )

View File

@@ -22,10 +22,8 @@ def get_data():
y_train, y_test = tf.one_hot(y_train, 10), tf.one_hot(y_test, 10) y_train, y_test = tf.one_hot(y_train, 10), tf.one_hot(y_test, 10)
x_train, x_test = tf.convert_to_tensor( x_train, x_test = tf.convert_to_tensor(x_train), tf.convert_to_tensor(x_test)
x_train), tf.convert_to_tensor(x_test) y_train, y_test = tf.convert_to_tensor(y_train), tf.convert_to_tensor(y_test)
y_train, y_test = tf.convert_to_tensor(
y_train), tf.convert_to_tensor(y_test)
print(f"x_train : {x_train[0].shape} | y_train : {y_train[0].shape}") print(f"x_train : {x_train[0].shape} | y_train : {y_train[0].shape}")
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}") print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
@@ -36,8 +34,7 @@ def get_data():
def make_model(): def make_model():
model = Sequential() model = Sequential()
model.add( model.add(
Conv2D(32, kernel_size=(5, 5), activation="relu", Conv2D(32, kernel_size=(5, 5), activation="relu", input_shape=(28, 28, 1))
input_shape=(28, 28, 1))
) )
model.add(MaxPooling2D(pool_size=(2, 2))) model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.5)) model.add(Dropout(0.5))
@@ -75,31 +72,31 @@ loss = [
pso_mnist = optimizer( pso_mnist = optimizer(
model, model,
loss="categorical_crossentropy", loss="categorical_crossentropy",
n_particles=100, n_particles=500,
c0=0.5, c0=0.5,
c1=0.8, c1=0.3,
w_min=0.6, w_min=0.2,
w_max=0.9, w_max=0.9,
negative_swarm=0.0, negative_swarm=0.0,
mutation_swarm=0.2, mutation_swarm=0.1,
convergence_reset=True, convergence_reset=True,
convergence_reset_patience=10, convergence_reset_patience=10,
convergence_reset_monitor="loss", convergence_reset_monitor="mse",
convergence_reset_min_delta=0.05, convergence_reset_min_delta=0.005,
) )
best_score = pso_mnist.fit( best_score = pso_mnist.fit(
x_train, x_train,
y_train, y_train,
epochs=300, epochs=500,
save_info=True, save_info=True,
log=2, log=2,
log_name="mnist", log_name="mnist",
renewal="loss", renewal="mse",
check_point=25, check_point=25,
empirical_balance=True, empirical_balance=False,
dispersion=False, dispersion=False,
batch_size=2048, batch_size=10000,
back_propagation=False, back_propagation=False,
validate_data=(x_test, y_test), validate_data=(x_test, y_test),
) )

View File

@@ -1,7 +1,9 @@
from .optimizer import Optimizer as optimizer from .optimizer import Optimizer as optimizer
from .particle import Particle as particle from .particle import Particle as particle
__version__ = "1.0.3" __version__ = "1.0.4"
print("pso2keras version : " + __version__)
__all__ = [ __all__ = [
"optimizer", "optimizer",

View File

@@ -1,13 +1,17 @@
import gc import gc
import json import json
import os import os
import socket
import subprocess
import sys import sys
from datetime import datetime from datetime import datetime
import numpy as np import numpy as np
import tensorflow as tf import tensorflow as tf
from tensorboard.plugins.hparams import api as hp
from tensorflow import keras from tensorflow import keras
from tqdm.auto import tqdm from tqdm.auto import tqdm
import atexit
from .particle import Particle from .particle import Particle
@@ -21,6 +25,14 @@ if gpus:
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
def find_free_port():
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(("localhost", 0))
port = sock.getsockname()[1]
sock.close()
return port
class Optimizer: class Optimizer:
""" """
particle swarm optimization particle swarm optimization
@@ -33,9 +45,9 @@ class Optimizer:
loss: any = None, loss: any = None,
n_particles: int = None, n_particles: int = None,
c0: float = 0.5, c0: float = 0.5,
c1: float = 1.5, c1: float = 0.3,
w_min: float = 0.5, w_min: float = 0.2,
w_max: float = 1.5, w_max: float = 0.9,
negative_swarm: float = 0, negative_swarm: float = 0,
mutation_swarm: float = 0, mutation_swarm: float = 0,
np_seed: int = None, np_seed: int = None,
@@ -94,11 +106,7 @@ class Optimizer:
if random_state is not None: if random_state is not None:
np.random.set_state(random_state) np.random.set_state(random_state)
model.compile( model.compile(loss=loss, optimizer="adam", metrics=["accuracy", "mse"])
loss=loss,
optimizer="adam",
metrics=["accuracy", "mse"]
)
self.model = model # 모델 구조 self.model = model # 모델 구조
self.loss = loss # 손실함수 self.loss = loss # 손실함수
self.n_particles = n_particles # 파티클 개수 self.n_particles = n_particles # 파티클 개수
@@ -124,11 +132,12 @@ class Optimizer:
print(f"start running time : {self.day}") print(f"start running time : {self.day}")
for i in tqdm(range(self.n_particles), desc="Initializing Particles"): for i in tqdm(range(self.n_particles), desc="Initializing Particles"):
self.particles[i] = Particle( self.particles[i] = Particle(
model, model,
self.loss, self.loss,
negative=True if i < self.negative_swarm * self.n_particles else False, negative=True
if i < self.negative_swarm * self.n_particles
else False,
mutation=self.mutation_swarm, mutation=self.mutation_swarm,
converge_reset=convergence_reset, converge_reset=convergence_reset,
converge_reset_patience=convergence_reset_patience, converge_reset_patience=convergence_reset_patience,
@@ -308,8 +317,9 @@ class Optimizer:
def __getBatchSlice(self, batch_size): def __getBatchSlice(self, batch_size):
return list( return list(
tf.data.Dataset.from_tensor_slices( tf.data.Dataset.from_tensor_slices((self.x, self.y))
(self.x, self.y)).shuffle(len(self.x)).batch(batch_size) .shuffle(len(self.x))
.batch(batch_size)
) )
def getDataset(self): def getDataset(self):
@@ -353,7 +363,13 @@ class Optimizer:
raise ValueError("x, y shape error") raise ValueError("x, y shape error")
if log not in [0, 1, 2]: if log not in [0, 1, 2]:
raise ValueError("log not in [0, 1, 2]") raise ValueError(
"""log not in [0, 1, 2]
0 : log 기록 안함
1 : csv
2 : tensorboard
"""
)
if renewal not in ["acc", "loss", "mse"]: if renewal not in ["acc", "loss", "mse"]:
raise ValueError("renewal not in ['acc', 'loss', 'mse']") raise ValueError("renewal not in ['acc', 'loss', 'mse']")
@@ -394,7 +410,19 @@ class Optimizer:
self.train_summary_writer[i] = tf.summary.create_file_writer( self.train_summary_writer[i] = tf.summary.create_file_writer(
train_log_dir + f"/{i}" train_log_dir + f"/{i}"
) )
port = find_free_port()
tensorboard_precess = subprocess.Popen(
[
"tensorboard",
"--logdir",
self.log_path,
"--port",
str(port),
]
)
tensorboard_url = f"http://localhost:{port}"
print(f"tensorboard url : {tensorboard_url}")
atexit.register(tensorboard_precess.kill)
elif check_point is not None or log == 1: elif check_point is not None or log == 1:
if not os.path.exists(self.log_path): if not os.path.exists(self.log_path):
os.makedirs(self.log_path, exist_ok=True) os.makedirs(self.log_path, exist_ok=True)
@@ -408,7 +436,7 @@ class Optimizer:
model_.compile( model_.compile(
loss=self.loss, loss=self.loss,
optimizer="adam", optimizer="adam",
metrics=["accuracy", "mse"] metrics=["accuracy", "mse"],
) )
model_.fit(x, y, epochs=1, verbose=0) model_.fit(x, y, epochs=1, verbose=0)
score = model_.evaluate(x, y, verbose=1) score = model_.evaluate(x, y, verbose=1)
@@ -463,8 +491,7 @@ class Optimizer:
if dispersion: if dispersion:
ts = weight_min + np.random.rand() * (weight_max - weight_min) ts = weight_min + np.random.rand() * (weight_max - weight_min)
g_, g_sh, g_len = self._encode( g_, g_sh, g_len = self._encode(Particle.g_best_weights)
Particle.g_best_weights)
decrement = (epochs - epoch + 1) / epochs decrement = (epochs - epoch + 1) / epochs
g_ = (1 - decrement) * g_ + decrement * ts g_ = (1 - decrement) * g_ + decrement * ts
g_best = self._decode_(g_, g_sh, g_len) g_best = self._decode_(g_, g_sh, g_len)
@@ -472,8 +499,7 @@ class Optimizer:
if empirical_balance: if empirical_balance:
if np.random.rand() < np.exp(-(epoch) / epochs): if np.random.rand() < np.exp(-(epoch) / epochs):
w_p_ = self._f( w_p_ = self._f(
x_batch, y_batch, self.particles[i].get_best_weights( x_batch, y_batch, self.particles[i].get_best_weights()
)
) )
w_g_ = self._f(x_batch, y_batch, g_best) w_g_ = self._f(x_batch, y_batch, g_best)
w_p = w_p_ / (w_p_ + w_g_) w_p = w_p_ / (w_p_ + w_g_)
@@ -491,9 +517,7 @@ class Optimizer:
1 1
/ ( / (
self.n_particles self.n_particles
* np.linalg.norm( * np.linalg.norm(weight_min - weight_max)
weight_min - weight_max
)
) )
* sigma_post * sigma_post
) )
@@ -543,9 +567,7 @@ class Optimizer:
if log == 2: if log == 2:
with self.train_summary_writer[i].as_default(): with self.train_summary_writer[i].as_default():
tf.summary.scalar("loss", score[0], step=epoch + 1) tf.summary.scalar("loss", score[0], step=epoch + 1)
tf.summary.scalar( tf.summary.scalar("accuracy", score[1], step=epoch + 1)
"accuracy", score[1], step=epoch + 1
)
tf.summary.scalar("mse", score[2], step=epoch + 1) tf.summary.scalar("mse", score[2], step=epoch + 1)
if renewal == "loss": if renewal == "loss":
@@ -555,6 +577,11 @@ class Optimizer:
min_loss, max_acc, min_mse = score min_loss, max_acc, min_mse = score
best_particle_index = i best_particle_index = i
elif score[0] == min_loss:
if score[1] > max_acc:
min_loss, max_acc, min_mse = score
best_particle_index = i
elif renewal == "acc": elif renewal == "acc":
# 최고 점수 보다 높거나 같을 경우 # 최고 점수 보다 높거나 같을 경우
if score[1] > max_acc: if score[1] > max_acc:
@@ -562,14 +589,22 @@ class Optimizer:
min_loss, max_acc, min_mse = score min_loss, max_acc, min_mse = score
best_particle_index = i best_particle_index = i
elif score[1] == max_acc:
if score[2] < min_mse:
min_loss, max_acc, min_mse = score
best_particle_index = i
elif renewal == "mse": elif renewal == "mse":
if score[2] < min_mse: if score[2] < min_mse:
min_loss, max_acc, min_mse = score min_loss, max_acc, min_mse = score
best_particle_index = i best_particle_index = i
elif score[2] == min_mse:
if score[1] > max_acc:
min_loss, max_acc, min_mse = score
best_particle_index = i
particle_sum += score[1] particle_sum += score[1]
if log == 1: if log == 1:
@@ -587,34 +622,28 @@ class Optimizer:
if renewal == "loss": if renewal == "loss":
if min_loss <= Particle.g_best_score[0]: if min_loss <= Particle.g_best_score[0]:
if min_loss < Particle.g_best_score[0]: if min_loss < Particle.g_best_score[0]:
self.particles[best_particle_index].update_global_best( self.particles[best_particle_index].update_global_best()
)
else: else:
if max_acc > Particle.g_best_score[1]: if max_acc > Particle.g_best_score[1]:
self.particles[best_particle_index].update_global_best( self.particles[best_particle_index].update_global_best()
)
elif renewal == "acc": elif renewal == "acc":
if max_acc >= Particle.g_best_score[1]: if max_acc >= Particle.g_best_score[1]:
# 최고 점수 보다 높을 경우 # 최고 점수 보다 높을 경우
if max_acc > Particle.g_best_score[1]: if max_acc > Particle.g_best_score[1]:
# 최고 점수 갱신 # 최고 점수 갱신
self.particles[best_particle_index].update_global_best( self.particles[best_particle_index].update_global_best()
)
# 최고 점수 와 같을 경우 # 최고 점수 와 같을 경우
else: else:
# 최저 loss 보다 낮을 경우 # 최저 loss 보다 낮을 경우
if min_loss < Particle.g_best_score[0]: if min_loss < Particle.g_best_score[0]:
self.particles[best_particle_index].update_global_best( self.particles[best_particle_index].update_global_best()
)
elif renewal == "mse": elif renewal == "mse":
if min_mse <= Particle.g_best_score[2]: if min_mse <= Particle.g_best_score[2]:
if min_mse < Particle.g_best_score[2]: if min_mse < Particle.g_best_score[2]:
self.particles[best_particle_index].update_global_best( self.particles[best_particle_index].update_global_best()
)
else: else:
if max_acc > Particle.g_best_score[1]: if max_acc > Particle.g_best_score[1]:
self.particles[best_particle_index].update_global_best( self.particles[best_particle_index].update_global_best()
)
# 최고 점수 갱신 # 최고 점수 갱신
epochs_pbar.set_description( epochs_pbar.set_description(
f"best - loss: {Particle.g_best_score[0]:.4f} - acc: {Particle.g_best_score[1]:.4f} - mse: {Particle.g_best_score[2]:.4f}" f"best - loss: {Particle.g_best_score[0]:.4f} - acc: {Particle.g_best_score[1]:.4f} - mse: {Particle.g_best_score[2]:.4f}"
@@ -623,9 +652,12 @@ class Optimizer:
if check_point is not None: if check_point is not None:
if epoch % check_point == 0: if epoch % check_point == 0:
os.makedirs( os.makedirs(
f"./logs/{log_name}/{self.day}", exist_ok=True) f"./logs/{log_name}/{self.day}",
exist_ok=True,
)
self._check_point_save( self._check_point_save(
f"./logs/{log_name}/{self.day}/ckpt-{epoch}") f"./logs/{log_name}/{self.day}/ckpt-{epoch}"
)
tf.keras.backend.reset_uids() tf.keras.backend.reset_uids()
tf.keras.backend.clear_session() tf.keras.backend.clear_session()
@@ -661,7 +693,7 @@ class Optimizer:
model.compile( model.compile(
loss=self.loss, loss=self.loss,
optimizer="adam", optimizer="adam",
metrics=["accuracy", "mse"] metrics=["accuracy", "mse"],
) )
return model return model
@@ -741,8 +773,7 @@ class Optimizer:
x, y = valid_data x, y = valid_data
model = self.get_best_model() model = self.get_best_model()
score = model.evaluate(x, y, verbose=1) score = model.evaluate(x, y, verbose=1)
print( print(f"model score - loss: {score[0]} - acc: {score[1]} - mse: {score[2]}")
f"model score - loss: {score[0]} - acc: {score[1]} - mse: {score[2]}")
model.save( model.save(
f"./{self.log_path}/model_{score[0 if self.renewal == 'loss' else 1 if self.renewal == 'acc' else 2 ]}.h5" f"./{self.log_path}/model_{score[0 if self.renewal == 'loss' else 1 if self.renewal == 'acc' else 2 ]}.h5"
) )

View File

@@ -54,11 +54,11 @@ class Particle:
self.__reset_particle() self.__reset_particle()
self.best_weights = self.model.get_weights() self.best_weights = self.model.get_weights()
self.before_best = self.model.get_weights() # self.before_best = self.model.get_weights()
self.negative = negative self.negative = negative
self.mutation = mutation self.mutation = mutation
self.best_score = [np.inf, 0, np.inf] self.best_score = [np.inf, 0, np.inf]
self.before_w = 0 # self.before_w = 0
self.score_history = [] self.score_history = []
self.converge_reset = converge_reset self.converge_reset = converge_reset
self.converge_reset_patience = converge_reset_patience self.converge_reset_patience = converge_reset_patience
@@ -196,7 +196,7 @@ class Particle:
del i_w_, i_s, i_l del i_w_, i_s, i_l
self.score_history = [] self.score_history = []
def _update_velocity(self, local_rate, global_rate, w): def _velocity_calculation(self, local_rate, global_rate, w):
""" """
현재 속도 업데이트 현재 속도 업데이트
@@ -209,20 +209,20 @@ class Particle:
encode_v, v_sh, v_len = self._encode(weights=self.velocities) encode_v, v_sh, v_len = self._encode(weights=self.velocities)
encode_p, p_sh, p_len = self._encode(weights=self.best_weights) encode_p, p_sh, p_len = self._encode(weights=self.best_weights)
encode_g, g_sh, g_len = self._encode(weights=Particle.g_best_weights) encode_g, g_sh, g_len = self._encode(weights=Particle.g_best_weights)
encode_before, before_sh, before_len = self._encode( # encode_before, before_sh, before_len = self._encode(
weights=self.before_best # weights=self.before_best
) # )
r_0 = np.random.rand() r_0 = np.random.rand()
r_1 = np.random.rand() r_1 = np.random.rand()
# 이전 전역 최적해와 현재 전역 최적해가 다르면 관성을 순간적으로 증가 - 값이 바뀔 경우 기존 관성을 특정 기간동안 유지 # 이전 전역 최적해와 현재 전역 최적해가 다르면 관성을 순간적으로 증가 - 값이 바뀔 경우 기존 관성을 특정 기간동안 유지
if not np.array_equal(encode_before, encode_g, equal_nan=True): # if not np.array_equal(encode_before, encode_g, equal_nan=True):
# 이전 가중치 중요도의 1.5 배로 관성을 증가 # 이전 가중치 중요도의 1.5 배로 관성을 증가
self.before_w = w * 0.5 # self.before_w = w * 0.5
w = w + self.before_w # w = w + self.before_w
else: # else:
self.before_w *= 0.75 # self.before_w *= 0.75
w = w + self.before_w # w = w + self.before_w
if self.negative: if self.negative:
# 지역 최적해와 전역 최적해를 음수로 사용하여 전역 탐색을 유도 # 지역 최적해와 전역 최적해를 음수로 사용하여 전역 탐색을 유도
@@ -251,10 +251,9 @@ class Particle:
del encode_v, v_sh, v_len del encode_v, v_sh, v_len
del encode_p, p_sh, p_len del encode_p, p_sh, p_len
del encode_g, g_sh, g_len del encode_g, g_sh, g_len
del encode_before, before_sh, before_len
del r_0, r_1 del r_0, r_1
def _update_velocity_w(self, local_rate, global_rate, w, w_p, w_g): def _velocity_calculation_w(self, local_rate, global_rate, w, w_p, w_g):
""" """
현재 속도 업데이트 현재 속도 업데이트
기본 업데이트의 변형으로 지역 최적해와 전역 최적해를 분산시켜 조기 수렴을 방지 기본 업데이트의 변형으로 지역 최적해와 전역 최적해를 분산시켜 조기 수렴을 방지
@@ -270,18 +269,18 @@ class Particle:
encode_v, v_sh, v_len = self._encode(weights=self.velocities) encode_v, v_sh, v_len = self._encode(weights=self.velocities)
encode_p, p_sh, p_len = self._encode(weights=self.best_weights) encode_p, p_sh, p_len = self._encode(weights=self.best_weights)
encode_g, g_sh, g_len = self._encode(weights=Particle.g_best_weights) encode_g, g_sh, g_len = self._encode(weights=Particle.g_best_weights)
encode_before, before_sh, before_len = self._encode( # encode_before, before_sh, before_len = self._encode(
weights=self.before_best # weights=self.before_best
) # )
r_0 = np.random.rand() r_0 = np.random.rand()
r_1 = np.random.rand() r_1 = np.random.rand()
if not np.array_equal(encode_before, encode_g, equal_nan=True): # if not np.array_equal(encode_before, encode_g, equal_nan=True):
self.before_w = w * 0.5 # self.before_w = w * 0.5
w = w + self.before_w # w = w + self.before_w
else: # else:
self.before_w *= 0.75 # self.before_w *= 0.75
w = w + self.before_w # w = w + self.before_w
if self.negative: if self.negative:
new_v = ( new_v = (
@@ -306,10 +305,9 @@ class Particle:
del encode_v, v_sh, v_len del encode_v, v_sh, v_len
del encode_p, p_sh, p_len del encode_p, p_sh, p_len
del encode_g, g_sh, g_len del encode_g, g_sh, g_len
del encode_before, before_sh, before_len
del r_0, r_1 del r_0, r_1
def _update_weights(self): def _position_update(self):
""" """
가중치 업데이트 가중치 업데이트
""" """
@@ -337,8 +335,8 @@ class Particle:
Returns: Returns:
list: 현재 파티클의 점수 list: 현재 파티클의 점수
""" """
self._update_velocity(local_rate, global_rate, w) self._velocity_calculation(local_rate, global_rate, w)
self._update_weights() self._position_update()
score = self.get_score(x, y, renewal) score = self.get_score(x, y, renewal)
@@ -385,8 +383,8 @@ class Particle:
Returns: Returns:
float: 현재 파티클의 점수 float: 현재 파티클의 점수
""" """
self._update_velocity_w(local_rate, global_rate, w, w_p, w_g) self._velocity_calculation_w(local_rate, global_rate, w, w_p, w_g)
self._update_weights() self._position_update()
score = self.get_score(x, y, renewal) score = self.get_score(x, y, renewal)

View File

@@ -1,6 +1,8 @@
ipython ipython
keras<=2.11.0 keras
numpy numpy
pandas==1.5.3 pandas
tensorflow<=2.11.1 tensorflow
tqdm==4.65.0 tqdm
scikit-learn
tensorboard

View File

@@ -16,8 +16,11 @@ setup(
"numpy", "numpy",
"pandas", "pandas",
"ipython", "ipython",
"tensorflow<=2.11.1,>=2.8.0", "matplotlib",
"keras<=2.11.1,>=2.8.0", "tensorflow",
"keras",
"scikit-learn",
"tensorboard",
], ],
packages=find_packages(exclude=[]), packages=find_packages(exclude=[]),
keywords=["pso", "tensorflow", "keras"], keywords=["pso", "tensorflow", "keras"],
@@ -33,5 +36,6 @@ setup(
"Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10", "Programming Language :: Python :: 3.10",
"Programming Language :: Python :: 3.11",
], ],
) )