mirror of
https://github.com/jung-geun/PSO.git
synced 2025-12-20 04:50:45 +09:00
23-07-12
xor iris 수치 교정 파티클의 분포 조정 가능하게 수정 random 시드 추출
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -5,12 +5,14 @@ __pycache__/
|
|||||||
|
|
||||||
.ipynb_checkpoints/
|
.ipynb_checkpoints/
|
||||||
|
|
||||||
|
# 테스트용 파일
|
||||||
|
test.ipynb
|
||||||
|
|
||||||
# 결과 저장용 디렉토리
|
# 결과 저장용 디렉토리
|
||||||
result/
|
result/
|
||||||
|
|
||||||
# 논문 관련 파일
|
# 논문 관련 파일
|
||||||
*.pdf
|
*.pdf
|
||||||
*.pptx
|
*.pptx
|
||||||
|
|
||||||
관련 논문/
|
관련 논문/
|
||||||
발표 자료/
|
발표 자료/
|
||||||
3
.vscode/settings.json
vendored
3
.vscode/settings.json
vendored
@@ -1,5 +1,6 @@
|
|||||||
{
|
{
|
||||||
"[python]": {
|
"[python]": {
|
||||||
"editor.defaultFormatter": "ms-python.black-formatter"
|
"editor.defaultFormatter": "ms-python.black-formatter"
|
||||||
}
|
},
|
||||||
|
"python.formatting.provider": "none"
|
||||||
}
|
}
|
||||||
@@ -28,7 +28,7 @@ pso 알고리즘을 사용하여 새로운 학습 방법을 찾는중 입니다
|
|||||||
# 초기 세팅
|
# 초기 세팅
|
||||||
|
|
||||||
```shell
|
```shell
|
||||||
conda env create -f env.yaml
|
conda env create -f ./conda_env/environment.yaml
|
||||||
```
|
```
|
||||||
|
|
||||||
# 현재 진행 상황
|
# 현재 진행 상황
|
||||||
@@ -89,12 +89,15 @@ pso 알고리즘을 이용하여 오차역전파 함수를 최적화 하는 방
|
|||||||
pso_xor = Optimizer(
|
pso_xor = Optimizer(
|
||||||
model,
|
model,
|
||||||
loss=loss,
|
loss=loss,
|
||||||
n_particles=75,
|
n_particles=50,
|
||||||
c0=0.35,
|
c0=0.35,
|
||||||
c1=0.8,
|
c1=0.8,
|
||||||
w_min=0.6,
|
w_min=0.6,
|
||||||
w_max=1.2,
|
w_max=1.2,
|
||||||
negative_swarm=0.25
|
negative_swarm=0.1,
|
||||||
|
mutation_swarm=0.2,
|
||||||
|
particle_min=-3,
|
||||||
|
particle_max=3,
|
||||||
)
|
)
|
||||||
|
|
||||||
best_score = pso_xor.fit(
|
best_score = pso_xor.fit(
|
||||||
@@ -106,27 +109,30 @@ pso 알고리즘을 이용하여 오차역전파 함수를 최적화 하는 방
|
|||||||
renewal="acc",
|
renewal="acc",
|
||||||
empirical_balance=False,
|
empirical_balance=False,
|
||||||
Dispersion=False,
|
Dispersion=False,
|
||||||
check_point=25
|
check_point=25,
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
위의 파라미터 기준 40 세대 이후부터 정확도가 100%가 나오는 것을 확인하였습니다
|
위의 파라미터 기준 10 세대 근처부터 정확도가 100%가 나오는 것을 확인하였습니다
|
||||||

|

|
||||||
|
|
||||||
2. iris 문제
|
2. iris 문제
|
||||||
|
|
||||||
```python
|
```python
|
||||||
loss = 'categorical_crossentropy'
|
loss = 'mean_squared_error'
|
||||||
|
|
||||||
pso_iris = Optimizer(
|
pso_iris = Optimizer(
|
||||||
model,
|
model,
|
||||||
loss=loss,
|
loss=loss,
|
||||||
n_particles=50,
|
n_particles=100,
|
||||||
c0=0.4,
|
c0=0.4,
|
||||||
c1=0.8,
|
c1=0.8,
|
||||||
w_min=0.7,
|
w_min=0.7,
|
||||||
w_max=1.0,
|
w_max=1.0,
|
||||||
negative_swarm=0.2
|
negative_swarm=0.1,
|
||||||
|
mutation_swarm=0.2,
|
||||||
|
particle_min=-3,
|
||||||
|
particle_max=3,
|
||||||
)
|
)
|
||||||
|
|
||||||
best_score = pso_iris.fit(
|
best_score = pso_iris.fit(
|
||||||
@@ -148,7 +154,7 @@ best_score = pso_iris.fit(
|
|||||||
3. mnist 문제
|
3. mnist 문제
|
||||||
|
|
||||||
```python
|
```python
|
||||||
loss = 'mse'
|
loss = 'mean_squared_error'
|
||||||
|
|
||||||
pso_mnist = Optimizer(
|
pso_mnist = Optimizer(
|
||||||
model,
|
model,
|
||||||
134
auto_tunning.py
134
auto_tunning.py
@@ -1,134 +0,0 @@
|
|||||||
# %%
|
|
||||||
import os
|
|
||||||
|
|
||||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
|
||||||
|
|
||||||
import tensorflow as tf
|
|
||||||
|
|
||||||
tf.random.set_seed(777) # for reproducibility
|
|
||||||
|
|
||||||
import gc
|
|
||||||
from datetime import date
|
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
from keras import backend as K
|
|
||||||
from keras.datasets import mnist
|
|
||||||
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
|
|
||||||
from keras.models import Sequential
|
|
||||||
from tensorflow import keras
|
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
from pso import Optimizer
|
|
||||||
|
|
||||||
|
|
||||||
def get_data():
|
|
||||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
|
||||||
|
|
||||||
x_train, x_test = x_train / 255.0, x_test / 255.0
|
|
||||||
x_train = x_train.reshape((60000, 28, 28, 1))
|
|
||||||
x_test = x_test.reshape((10000, 28, 28, 1))
|
|
||||||
|
|
||||||
print(f"x_train : {x_train[0].shape} | y_train : {y_train[0].shape}")
|
|
||||||
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
|
|
||||||
return x_train, y_train, x_test, y_test
|
|
||||||
|
|
||||||
|
|
||||||
def get_data_test():
|
|
||||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
|
||||||
x_test = x_test.reshape((10000, 28, 28, 1))
|
|
||||||
|
|
||||||
return x_test, y_test
|
|
||||||
|
|
||||||
|
|
||||||
def make_model():
|
|
||||||
model = Sequential()
|
|
||||||
model.add(
|
|
||||||
Conv2D(32, kernel_size=(5, 5), activation="relu", input_shape=(28, 28, 1))
|
|
||||||
)
|
|
||||||
model.add(MaxPooling2D(pool_size=(3, 3)))
|
|
||||||
model.add(Conv2D(64, kernel_size=(3, 3), activation="relu"))
|
|
||||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
|
||||||
model.add(Dropout(0.25))
|
|
||||||
model.add(Flatten())
|
|
||||||
model.add(Dense(128, activation="relu"))
|
|
||||||
model.add(Dense(10, activation="softmax"))
|
|
||||||
|
|
||||||
return model
|
|
||||||
|
|
||||||
|
|
||||||
# %%
|
|
||||||
model = make_model()
|
|
||||||
x_test, y_test = get_data_test()
|
|
||||||
# loss = 'binary_crossentropy'
|
|
||||||
# loss = 'categorical_crossentropy'
|
|
||||||
# loss = 'sparse_categorical_crossentropy'
|
|
||||||
# loss = 'kullback_leibler_divergence'
|
|
||||||
# loss = 'poisson'
|
|
||||||
# loss = 'cosine_similarity'
|
|
||||||
# loss = 'log_cosh'
|
|
||||||
# loss = 'huber_loss'
|
|
||||||
# loss = 'mean_absolute_error'
|
|
||||||
# loss = 'mean_absolute_percentage_error'
|
|
||||||
# loss = 'mean_squared_error'
|
|
||||||
|
|
||||||
loss = [
|
|
||||||
"mse",
|
|
||||||
"categorical_crossentropy",
|
|
||||||
"binary_crossentropy",
|
|
||||||
"kullback_leibler_divergence",
|
|
||||||
"poisson",
|
|
||||||
"cosine_similarity",
|
|
||||||
"log_cosh",
|
|
||||||
"huber_loss",
|
|
||||||
"mean_absolute_error",
|
|
||||||
"mean_absolute_percentage_error",
|
|
||||||
]
|
|
||||||
n_particles = [50, 75, 100]
|
|
||||||
c0 = [0.25, 0.35, 0.45, 0.55]
|
|
||||||
c1 = [0.5, 0.6, 0.7, 0.8, 0.9]
|
|
||||||
w_min = [0.5, 0.6, 0.7]
|
|
||||||
w_max = [1.1, 1.2, 1.3]
|
|
||||||
negative_swarm = [0.25, 0.3, 0.5]
|
|
||||||
eb = [True, False]
|
|
||||||
dispersion = [True, False]
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
try:
|
|
||||||
for loss_ in loss:
|
|
||||||
for n in n_particles:
|
|
||||||
for c_0 in c0:
|
|
||||||
for c_1 in c1:
|
|
||||||
for w_m in w_min:
|
|
||||||
for w_M in w_max:
|
|
||||||
for n_s in negative_swarm:
|
|
||||||
pso_mnist = Optimizer(
|
|
||||||
model,
|
|
||||||
loss=loss_,
|
|
||||||
n_particles=n,
|
|
||||||
c0=c_0,
|
|
||||||
c1=c_1,
|
|
||||||
w_min=w_m,
|
|
||||||
w_max=w_M,
|
|
||||||
negative_swarm=n_s,
|
|
||||||
)
|
|
||||||
|
|
||||||
best_score = pso_mnist.fit(
|
|
||||||
x_test,
|
|
||||||
y_test,
|
|
||||||
epochs=200,
|
|
||||||
save=True,
|
|
||||||
save_path="./result/mnist",
|
|
||||||
renewal="acc",
|
|
||||||
empirical_balance=False,
|
|
||||||
Dispersion=False,
|
|
||||||
check_point=25,
|
|
||||||
)
|
|
||||||
|
|
||||||
del pso_mnist
|
|
||||||
gc.collect()
|
|
||||||
tf.keras.backend.clear_session()
|
|
||||||
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
print("KeyboardInterrupt")
|
|
||||||
finally:
|
|
||||||
print("Finish")
|
|
||||||
104
example.py
104
example.py
@@ -1,104 +0,0 @@
|
|||||||
"""
|
|
||||||
example.py
|
|
||||||
|
|
||||||
Demonstrates usage of PSOkeras module by training dense Keras model for classifying Iris data set. Also compares
|
|
||||||
results with a number of independent runs of standard Backpropagation algorithm (Adam) equal to the particle count.
|
|
||||||
|
|
||||||
@author Mike Holcomb (mjh170630@utdallas.edu)
|
|
||||||
"""
|
|
||||||
|
|
||||||
import tensorflow as tf
|
|
||||||
from sklearn.datasets import load_iris
|
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
from tensorflow import keras
|
|
||||||
from tensorflow.keras.layers import Dense
|
|
||||||
from tensorflow.keras.models import Sequential
|
|
||||||
|
|
||||||
from psokeras import Optimizer
|
|
||||||
|
|
||||||
N = 50 # number of particles
|
|
||||||
STEPS = 500 # number of steps
|
|
||||||
LOSS = "mse" # Loss function
|
|
||||||
BATCH_SIZE = 32 # Size of batches to train on
|
|
||||||
|
|
||||||
|
|
||||||
def build_model(loss):
|
|
||||||
"""
|
|
||||||
Builds test Keras model for predicting Iris classifications
|
|
||||||
|
|
||||||
:param loss (str): Type of loss - must be one of Keras accepted keras losses
|
|
||||||
:return: Keras dense model of predefined structure
|
|
||||||
"""
|
|
||||||
model = Sequential()
|
|
||||||
model.add(Dense(4, activation="sigmoid", input_dim=4, use_bias=True))
|
|
||||||
model.add(Dense(4, activation="sigmoid", use_bias=True))
|
|
||||||
model.add(Dense(3, activation="softmax", use_bias=True))
|
|
||||||
|
|
||||||
model.compile(loss=loss, optimizer="adam")
|
|
||||||
|
|
||||||
return model
|
|
||||||
|
|
||||||
|
|
||||||
def vanilla_backpropagation(x_train, y_train):
|
|
||||||
"""
|
|
||||||
Runs N number of backpropagation model training simulations
|
|
||||||
:param x_train: x values to train on
|
|
||||||
:param y_train: target labels to train with
|
|
||||||
:return: best model run as measured by LOSS
|
|
||||||
"""
|
|
||||||
best_model = None
|
|
||||||
best_score = 100.0
|
|
||||||
|
|
||||||
for i in range(N):
|
|
||||||
model_s = build_model(LOSS)
|
|
||||||
model_s.fit(x_train, y_train, epochs=STEPS, batch_size=BATCH_SIZE, verbose=0)
|
|
||||||
train_score = model_s.evaluate(
|
|
||||||
x_train, y_train, batch_size=BATCH_SIZE, verbose=0
|
|
||||||
)
|
|
||||||
if train_score < best_score:
|
|
||||||
best_model = model_s
|
|
||||||
best_score = train_score
|
|
||||||
return best_model
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
|
||||||
# Section I: Build the data set
|
|
||||||
iris = load_iris()
|
|
||||||
x_train, x_test, y_train, y_test = train_test_split(
|
|
||||||
iris.data,
|
|
||||||
keras.utils.to_categorical(iris.target, num_classes=None),
|
|
||||||
test_size=0.5,
|
|
||||||
random_state=0,
|
|
||||||
stratify=iris.target,
|
|
||||||
)
|
|
||||||
|
|
||||||
# Section II: First run the backpropagation simulation
|
|
||||||
model_s = vanilla_backpropagation(x_train=x_train, y_train=y_train)
|
|
||||||
|
|
||||||
b_train_score = model_s.evaluate(x_train, y_train, batch_size=BATCH_SIZE, verbose=0)
|
|
||||||
b_test_score = model_s.evaluate(x_test, y_test, batch_size=BATCH_SIZE, verbose=0)
|
|
||||||
print("Backprop -- train: {:.4f} test: {:.4f}".format(b_train_score, b_test_score))
|
|
||||||
|
|
||||||
# Section III: Then run the particle swarm optimization
|
|
||||||
# First build model to train on (primarily used for structure, also included in swarm)
|
|
||||||
model_p = build_model(LOSS)
|
|
||||||
|
|
||||||
# Instantiate optimizer with model, loss function, and hyperparameters
|
|
||||||
pso = Optimizer(
|
|
||||||
model=model_p,
|
|
||||||
loss=LOSS,
|
|
||||||
n=N, # Number of particles
|
|
||||||
acceleration=1.0, # Contribution of recursive particle velocity (acceleration)
|
|
||||||
local_rate=0.6, # Contribution of locally best weights to new velocity
|
|
||||||
global_rate=0.4, # Contribution of globally best weights to new velocity
|
|
||||||
)
|
|
||||||
|
|
||||||
# Train model on provided data
|
|
||||||
pso.fit(x_train, y_train, steps=STEPS, batch_size=BATCH_SIZE)
|
|
||||||
|
|
||||||
# Get a copy of the model with the globally best weights
|
|
||||||
model_p = pso.get_best_model()
|
|
||||||
|
|
||||||
p_train_score = model_p.evaluate(x_train, y_train, batch_size=BATCH_SIZE, verbose=0)
|
|
||||||
p_test_score = model_p.evaluate(x_test, y_test, batch_size=BATCH_SIZE, verbose=0)
|
|
||||||
print("PSO -- train: {:.4f} test: {:.4f}".format(p_train_score, p_test_score))
|
|
||||||
|
Before Width: | Height: | Size: 52 KiB After Width: | Height: | Size: 52 KiB |
|
Before Width: | Height: | Size: 47 KiB After Width: | Height: | Size: 47 KiB |
BIN
history_plt/xor_2_10.png
Normal file
BIN
history_plt/xor_2_10.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 142 KiB |
BIN
history_plt/xor_2_15.png
Normal file
BIN
history_plt/xor_2_15.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 273 KiB |
9
iris.py
9
iris.py
@@ -1,4 +1,5 @@
|
|||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||||
|
|
||||||
@@ -39,11 +40,11 @@ def load_data():
|
|||||||
model = make_model()
|
model = make_model()
|
||||||
x_train, x_test, y_train, y_test = load_data()
|
x_train, x_test, y_train, y_test = load_data()
|
||||||
|
|
||||||
loss = ["categorical_crossentropy"]
|
loss = ["categorical_crossentropy", 'mean_squared_error']
|
||||||
|
|
||||||
pso_iris = Optimizer(
|
pso_iris = Optimizer(
|
||||||
model,
|
model,
|
||||||
loss=loss[0],
|
loss=loss[1],
|
||||||
n_particles=100,
|
n_particles=100,
|
||||||
c0=0.4,
|
c0=0.4,
|
||||||
c1=0.8,
|
c1=0.8,
|
||||||
@@ -51,6 +52,8 @@ pso_iris = Optimizer(
|
|||||||
w_max=1.0,
|
w_max=1.0,
|
||||||
negative_swarm=0.1,
|
negative_swarm=0.1,
|
||||||
mutation_swarm=0.2,
|
mutation_swarm=0.2,
|
||||||
|
particle_min=-3,
|
||||||
|
particle_max=3,
|
||||||
)
|
)
|
||||||
|
|
||||||
best_score = pso_iris.fit(
|
best_score = pso_iris.fit(
|
||||||
@@ -66,3 +69,5 @@ best_score = pso_iris.fit(
|
|||||||
)
|
)
|
||||||
|
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
print("Done!")
|
||||||
|
sys.exit(0)
|
||||||
|
|||||||
15
mnist.py
15
mnist.py
@@ -1,20 +1,19 @@
|
|||||||
# %%
|
# %%
|
||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||||
|
|
||||||
import gc
|
import gc
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
from tensorflow import keras
|
|
||||||
from keras.datasets import mnist
|
from keras.datasets import mnist
|
||||||
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
|
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
|
||||||
from keras.models import Sequential
|
from keras.models import Sequential
|
||||||
|
from tensorflow import keras
|
||||||
|
|
||||||
from pso import Optimizer
|
from pso import Optimizer
|
||||||
|
|
||||||
# from pso import Optimizer_Test
|
|
||||||
|
|
||||||
|
|
||||||
def get_data():
|
def get_data():
|
||||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||||
@@ -85,11 +84,11 @@ if __name__ == "__main__":
|
|||||||
pso_mnist = Optimizer(
|
pso_mnist = Optimizer(
|
||||||
model,
|
model,
|
||||||
loss=loss[0],
|
loss=loss[0],
|
||||||
n_particles=75,
|
n_particles=70,
|
||||||
c0=0.25,
|
c0=0.25,
|
||||||
c1=0.4,
|
c1=0.45,
|
||||||
w_min=0.2,
|
w_min=0.35,
|
||||||
w_max=0.55,
|
w_max=0.6,
|
||||||
negative_swarm=0.1,
|
negative_swarm=0.1,
|
||||||
mutation_swarm=0.2,
|
mutation_swarm=0.2,
|
||||||
)
|
)
|
||||||
@@ -110,3 +109,5 @@ if __name__ == "__main__":
|
|||||||
print(e)
|
print(e)
|
||||||
finally:
|
finally:
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
print("Done!")
|
||||||
|
sys.exit(0)
|
||||||
|
|||||||
@@ -1,8 +1,11 @@
|
|||||||
from .optimizer import Optimizer
|
from .optimizer import Optimizer
|
||||||
from .particle import Particle
|
from .particle import Particle
|
||||||
from .optimizer_target import Optimizer_Target
|
# from .optimizer_target import Optimizer_Target
|
||||||
|
|
||||||
|
__version__ = '0.1.0'
|
||||||
|
|
||||||
__all__ = [
|
__all__ = [
|
||||||
'Optimizer',
|
'Optimizer',
|
||||||
'Particle',
|
'Particle',
|
||||||
'Optimizer_Target'
|
# 'Optimizer_Target'
|
||||||
]
|
]
|
||||||
@@ -41,6 +41,8 @@ class Optimizer:
|
|||||||
mutation_swarm: float = 0,
|
mutation_swarm: float = 0,
|
||||||
np_seed: int = None,
|
np_seed: int = None,
|
||||||
tf_seed: int = None,
|
tf_seed: int = None,
|
||||||
|
particle_min: float = -5,
|
||||||
|
particle_max: float = 5,
|
||||||
):
|
):
|
||||||
"""
|
"""
|
||||||
particle swarm optimization
|
particle swarm optimization
|
||||||
@@ -63,6 +65,8 @@ class Optimizer:
|
|||||||
if tf_seed is not None:
|
if tf_seed is not None:
|
||||||
tf.random.set_seed(tf_seed)
|
tf.random.set_seed(tf_seed)
|
||||||
|
|
||||||
|
self.random_state = np.random.get_state()
|
||||||
|
|
||||||
self.model = model # 모델 구조
|
self.model = model # 모델 구조
|
||||||
self.loss = loss # 손실함수
|
self.loss = loss # 손실함수
|
||||||
self.n_particles = n_particles # 파티클 개수
|
self.n_particles = n_particles # 파티클 개수
|
||||||
@@ -82,6 +86,7 @@ class Optimizer:
|
|||||||
self.renewal = "acc"
|
self.renewal = "acc"
|
||||||
self.Dispersion = False
|
self.Dispersion = False
|
||||||
self.day = datetime.now().strftime("%m-%d-%H-%M")
|
self.day = datetime.now().strftime("%m-%d-%H-%M")
|
||||||
|
self.empirical_balance = False
|
||||||
|
|
||||||
negative_count = 0
|
negative_count = 0
|
||||||
|
|
||||||
@@ -89,7 +94,7 @@ class Optimizer:
|
|||||||
m = keras.models.model_from_json(model.to_json())
|
m = keras.models.model_from_json(model.to_json())
|
||||||
init_weights = m.get_weights()
|
init_weights = m.get_weights()
|
||||||
w_, sh_, len_ = self._encode(init_weights)
|
w_, sh_, len_ = self._encode(init_weights)
|
||||||
w_ = np.random.uniform(-3, 3, len(w_))
|
w_ = np.random.uniform(particle_min, particle_max, len(w_))
|
||||||
m.set_weights(self._decode(w_, sh_, len_))
|
m.set_weights(self._decode(w_, sh_, len_))
|
||||||
m.compile(loss=self.loss, optimizer="sgd", metrics=["accuracy"])
|
m.compile(loss=self.loss, optimizer="sgd", metrics=["accuracy"])
|
||||||
self.particles[i] = Particle(
|
self.particles[i] = Particle(
|
||||||
@@ -254,6 +259,7 @@ class Optimizer:
|
|||||||
elif renewal == "both":
|
elif renewal == "both":
|
||||||
if local_score[1] > self.g_best_score[0]:
|
if local_score[1] > self.g_best_score[0]:
|
||||||
self.g_best_score[0] = local_score[1]
|
self.g_best_score[0] = local_score[1]
|
||||||
|
self.g_best_score[1] = local_score[0]
|
||||||
self.g_best = p.get_best_weights()
|
self.g_best = p.get_best_weights()
|
||||||
self.g_best_ = p.get_best_weights()
|
self.g_best_ = p.get_best_weights()
|
||||||
|
|
||||||
@@ -274,7 +280,6 @@ class Optimizer:
|
|||||||
else:
|
else:
|
||||||
f.write("\n")
|
f.write("\n")
|
||||||
|
|
||||||
f.close()
|
|
||||||
del local_score
|
del local_score
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|
||||||
@@ -424,7 +429,6 @@ class Optimizer:
|
|||||||
f.write(", ")
|
f.write(", ")
|
||||||
else:
|
else:
|
||||||
f.write("\n")
|
f.write("\n")
|
||||||
f.close()
|
|
||||||
|
|
||||||
if check_point is not None:
|
if check_point is not None:
|
||||||
if epoch % check_point == 0:
|
if epoch % check_point == 0:
|
||||||
@@ -498,6 +502,11 @@ class Optimizer:
|
|||||||
"Dispersion": self.Dispersion,
|
"Dispersion": self.Dispersion,
|
||||||
"negative_swarm": self.negative_swarm,
|
"negative_swarm": self.negative_swarm,
|
||||||
"mutation_swarm": self.mutation_swarm,
|
"mutation_swarm": self.mutation_swarm,
|
||||||
|
"random_state_0": self.random_state[0],
|
||||||
|
"random_state_1": self.random_state[1].tolist(),
|
||||||
|
"random_state_2": self.random_state[2],
|
||||||
|
"random_state_3": self.random_state[3],
|
||||||
|
"random_state_4": self.random_state[4],
|
||||||
"renewal": self.renewal,
|
"renewal": self.renewal,
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -507,8 +516,6 @@ class Optimizer:
|
|||||||
) as f:
|
) as f:
|
||||||
json.dump(json_save, f, indent=4)
|
json.dump(json_save, f, indent=4)
|
||||||
|
|
||||||
f.close()
|
|
||||||
|
|
||||||
def _check_point_save(self, save_path: str = f"./result/check_point"):
|
def _check_point_save(self, save_path: str = f"./result/check_point"):
|
||||||
"""
|
"""
|
||||||
중간 저장
|
중간 저장
|
||||||
|
|||||||
7
requirements.txt
Normal file
7
requirements.txt
Normal file
@@ -0,0 +1,7 @@
|
|||||||
|
ipython @ file:///home/conda/feedstock_root/build_artifacts/ipython_1680185408135/work
|
||||||
|
keras==2.11.0
|
||||||
|
matplotlib @ file:///croot/matplotlib-suite_1679593461707/work
|
||||||
|
numpy @ file:///work/mkl/numpy_and_numpy_base_1682953417311/work
|
||||||
|
pandas==1.5.3
|
||||||
|
tensorflow==2.11.0
|
||||||
|
tqdm @ file:///croot/tqdm_1679561862951/work
|
||||||
17
setup.py
Normal file
17
setup.py
Normal file
@@ -0,0 +1,17 @@
|
|||||||
|
from setuptools import setup, find_packages
|
||||||
|
|
||||||
|
setup(
|
||||||
|
name='pso-keras',
|
||||||
|
version='0.1.0',
|
||||||
|
description='Particle Swarm Optimization to tensorflow package',
|
||||||
|
author='pieroot',
|
||||||
|
author_email='jgbong0306@gmail.com',
|
||||||
|
url='https://github.com/jung-geun/PSO',
|
||||||
|
install_requires=['tqdm', 'numpy', 'tensorflow', 'keras'],
|
||||||
|
packages=find_packages(exclude=[]),
|
||||||
|
keywords=['pso', 'tensorflow', 'keras'],
|
||||||
|
python_requires='>=3.8',
|
||||||
|
package_data={},
|
||||||
|
zip_safe=False,
|
||||||
|
long_description=open('README.md', encoding='UTF8').read(),
|
||||||
|
)
|
||||||
BIN
weights.h5
BIN
weights.h5
Binary file not shown.
11
xor.py
11
xor.py
@@ -1,5 +1,6 @@
|
|||||||
# %%
|
# %%
|
||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||||
|
|
||||||
@@ -49,13 +50,15 @@ loss = [
|
|||||||
pso_xor = Optimizer(
|
pso_xor = Optimizer(
|
||||||
model,
|
model,
|
||||||
loss=loss[0],
|
loss=loss[0],
|
||||||
n_particles=75,
|
n_particles=50,
|
||||||
c0=0.35,
|
c0=0.35,
|
||||||
c1=0.8,
|
c1=0.8,
|
||||||
w_min=0.6,
|
w_min=0.6,
|
||||||
w_max=1.2,
|
w_max=1.2,
|
||||||
negative_swarm=0.25,
|
negative_swarm=0.1,
|
||||||
mutation_swarm=0.25,
|
mutation_swarm=0.2,
|
||||||
|
particle_min=-3,
|
||||||
|
particle_max=3,
|
||||||
)
|
)
|
||||||
best_score = pso_xor.fit(
|
best_score = pso_xor.fit(
|
||||||
x_test,
|
x_test,
|
||||||
@@ -69,4 +72,6 @@ best_score = pso_xor.fit(
|
|||||||
check_point=25,
|
check_point=25,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
print("Done!")
|
||||||
|
sys.exit(0)
|
||||||
# %%
|
# %%
|
||||||
|
|||||||
Reference in New Issue
Block a user