mirror of
https://github.com/jung-geun/PSO.git
synced 2025-12-20 04:50:45 +09:00
23-07-07
dev container 설정 - tqdm + tensorflow 자동 설치 env name = pso 로 자동 생성
This commit is contained in:
@@ -2,14 +2,20 @@
|
|||||||
// README at: https://github.com/devcontainers/templates/tree/main/src/miniconda
|
// README at: https://github.com/devcontainers/templates/tree/main/src/miniconda
|
||||||
{
|
{
|
||||||
"name": "Miniconda (Python 3)",
|
"name": "Miniconda (Python 3)",
|
||||||
|
// Configure tool-specific properties.
|
||||||
"customizations": {
|
"customizations": {
|
||||||
"vscode": {
|
"vscode": {
|
||||||
"extensions": [
|
"extensions": [
|
||||||
"ms-python.python",
|
"ms-python.python",
|
||||||
"ms-toolsai.jupyter"
|
"ms-toolsai.jupyter",
|
||||||
|
"donjayamanne.python-extension-pack",
|
||||||
|
"tht13.python",
|
||||||
|
"esbenp.prettier-vscode",
|
||||||
|
"ms-python.black-formatter"
|
||||||
]
|
]
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
|
// Features to add to the dev container. More info: https://containers.dev/features.
|
||||||
"features": {
|
"features": {
|
||||||
"ghcr.io/devcontainers/features/nvidia-cuda:1": {
|
"ghcr.io/devcontainers/features/nvidia-cuda:1": {
|
||||||
"installCudnn": true
|
"installCudnn": true
|
||||||
@@ -22,15 +28,13 @@
|
|||||||
"version": "3.9"
|
"version": "3.9"
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
"postCreateCommand": "conda env create --file environment.yaml --name pso"
|
|
||||||
// Features to add to the dev container. More info: https://containers.dev/features.
|
|
||||||
// "features": {},
|
|
||||||
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
// Use 'forwardPorts' to make a list of ports inside the container available locally.
|
||||||
// "forwardPorts": [],
|
// "forwardPorts": [],
|
||||||
// Use 'postCreateCommand' to run commands after the container is created.
|
// Use 'postCreateCommand' to run commands after the container is created.
|
||||||
// "postCreateCommand": "python --version",
|
"postCreateCommand": [
|
||||||
// Configure tool-specific properties.
|
"conda env create --file environment.yaml --name pso",
|
||||||
// "customizations": {},
|
"conda activate pso"
|
||||||
|
]
|
||||||
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
// Uncomment to connect as root instead. More info: https://aka.ms/dev-containers-non-root.
|
||||||
// "remoteUser": "root"
|
// "remoteUser": "root"
|
||||||
}
|
}
|
||||||
24
.github/workflows/python-package-conda.yml
vendored
24
.github/workflows/python-package-conda.yml
vendored
@@ -1,24 +0,0 @@
|
|||||||
name: Python Package using Conda
|
|
||||||
|
|
||||||
on: [push]
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
build-linux:
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
strategy:
|
|
||||||
max-parallel: 5
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- uses: actions/checkout@v3
|
|
||||||
- name: Set up Python 3.9
|
|
||||||
uses: actions/setup-python@v3
|
|
||||||
with:
|
|
||||||
python-version: "3.9"
|
|
||||||
- name: Add conda to system path
|
|
||||||
run: |
|
|
||||||
# $CONDA is an environment variable pointing to the root of the miniconda directory
|
|
||||||
echo $CONDA/bin >> $GITHUB_PATH
|
|
||||||
- name: Install dependencies
|
|
||||||
run: |
|
|
||||||
conda env create --file environment.yaml --name pso
|
|
||||||
conda activate pso
|
|
||||||
@@ -1,30 +1,25 @@
|
|||||||
# %%
|
# %%
|
||||||
import os
|
import os
|
||||||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
|
||||||
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
tf.random.set_seed(777) # for reproducibility
|
tf.random.set_seed(777) # for reproducibility
|
||||||
|
|
||||||
from tensorflow import keras
|
import gc
|
||||||
from keras.datasets import mnist
|
from datetime import date
|
||||||
from keras.models import Sequential
|
|
||||||
from keras.layers import Dense, Dropout, Flatten
|
|
||||||
from keras.layers import Conv2D, MaxPooling2D
|
|
||||||
from keras import backend as K
|
|
||||||
|
|
||||||
# from pso_tf import PSO
|
|
||||||
from pso import Optimizer
|
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
from keras import backend as K
|
||||||
from datetime import date
|
from keras.datasets import mnist
|
||||||
|
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
|
||||||
|
from keras.models import Sequential
|
||||||
|
from tensorflow import keras
|
||||||
from tqdm import tqdm
|
from tqdm import tqdm
|
||||||
|
|
||||||
import gc
|
from pso import Optimizer
|
||||||
|
|
||||||
# print(tf.__version__)
|
|
||||||
# print(tf.config.list_physical_devices())
|
|
||||||
# print(f"Num GPUs Available: {len(tf.config.list_physical_devices('GPU'))}")
|
|
||||||
|
|
||||||
def get_data():
|
def get_data():
|
||||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||||
@@ -37,26 +32,30 @@ def get_data():
|
|||||||
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
|
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
|
||||||
return x_train, y_train, x_test, y_test
|
return x_train, y_train, x_test, y_test
|
||||||
|
|
||||||
|
|
||||||
def get_data_test():
|
def get_data_test():
|
||||||
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
(x_train, y_train), (x_test, y_test) = mnist.load_data()
|
||||||
x_test = x_test.reshape((10000, 28, 28, 1))
|
x_test = x_test.reshape((10000, 28, 28, 1))
|
||||||
|
|
||||||
return x_test, y_test
|
return x_test, y_test
|
||||||
|
|
||||||
|
|
||||||
def make_model():
|
def make_model():
|
||||||
model = Sequential()
|
model = Sequential()
|
||||||
model.add(Conv2D(32, kernel_size=(5, 5),
|
model.add(
|
||||||
activation='relu', input_shape=(28, 28, 1)))
|
Conv2D(32, kernel_size=(5, 5), activation="relu", input_shape=(28, 28, 1))
|
||||||
|
)
|
||||||
model.add(MaxPooling2D(pool_size=(3, 3)))
|
model.add(MaxPooling2D(pool_size=(3, 3)))
|
||||||
model.add(Conv2D(64, kernel_size=(3, 3), activation='relu'))
|
model.add(Conv2D(64, kernel_size=(3, 3), activation="relu"))
|
||||||
model.add(MaxPooling2D(pool_size=(2, 2)))
|
model.add(MaxPooling2D(pool_size=(2, 2)))
|
||||||
model.add(Dropout(0.25))
|
model.add(Dropout(0.25))
|
||||||
model.add(Flatten())
|
model.add(Flatten())
|
||||||
model.add(Dense(128, activation='relu'))
|
model.add(Dense(128, activation="relu"))
|
||||||
model.add(Dense(10, activation='softmax'))
|
model.add(Dense(10, activation="softmax"))
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
# %%
|
# %%
|
||||||
model = make_model()
|
model = make_model()
|
||||||
x_test, y_test = get_data_test()
|
x_test, y_test = get_data_test()
|
||||||
@@ -72,7 +71,18 @@ x_test, y_test = get_data_test()
|
|||||||
# loss = 'mean_absolute_percentage_error'
|
# loss = 'mean_absolute_percentage_error'
|
||||||
# loss = 'mean_squared_error'
|
# loss = 'mean_squared_error'
|
||||||
|
|
||||||
loss = ['mse', 'categorical_crossentropy', 'binary_crossentropy', 'kullback_leibler_divergence', 'poisson', 'cosine_similarity', 'log_cosh', 'huber_loss', 'mean_absolute_error', 'mean_absolute_percentage_error']
|
loss = [
|
||||||
|
"mse",
|
||||||
|
"categorical_crossentropy",
|
||||||
|
"binary_crossentropy",
|
||||||
|
"kullback_leibler_divergence",
|
||||||
|
"poisson",
|
||||||
|
"cosine_similarity",
|
||||||
|
"log_cosh",
|
||||||
|
"huber_loss",
|
||||||
|
"mean_absolute_error",
|
||||||
|
"mean_absolute_percentage_error",
|
||||||
|
]
|
||||||
n_particles = [50, 75, 100]
|
n_particles = [50, 75, 100]
|
||||||
c0 = [0.25, 0.35, 0.45, 0.55]
|
c0 = [0.25, 0.35, 0.45, 0.55]
|
||||||
c1 = [0.5, 0.6, 0.7, 0.8, 0.9]
|
c1 = [0.5, 0.6, 0.7, 0.8, 0.9]
|
||||||
@@ -99,8 +109,8 @@ if __name__ == "__main__":
|
|||||||
c1=c_1,
|
c1=c_1,
|
||||||
w_min=w_m,
|
w_min=w_m,
|
||||||
w_max=w_M,
|
w_max=w_M,
|
||||||
negative_swarm=n_s
|
negative_swarm=n_s,
|
||||||
)
|
)
|
||||||
|
|
||||||
best_score = pso_mnist.fit(
|
best_score = pso_mnist.fit(
|
||||||
x_test,
|
x_test,
|
||||||
@@ -111,8 +121,8 @@ if __name__ == "__main__":
|
|||||||
renewal="acc",
|
renewal="acc",
|
||||||
empirical_balance=False,
|
empirical_balance=False,
|
||||||
Dispersion=False,
|
Dispersion=False,
|
||||||
check_point=25
|
check_point=25,
|
||||||
)
|
)
|
||||||
|
|
||||||
del pso_mnist
|
del pso_mnist
|
||||||
gc.collect()
|
gc.collect()
|
||||||
|
|||||||
@@ -10,8 +10,8 @@ dependencies:
|
|||||||
- pandas=1.5.3=py39h417a72b_0
|
- pandas=1.5.3=py39h417a72b_0
|
||||||
- pip=23.0.1=py39h06a4308_0
|
- pip=23.0.1=py39h06a4308_0
|
||||||
- python=3.9.16=h7a1cb2a_2
|
- python=3.9.16=h7a1cb2a_2
|
||||||
|
- tqdm=4.65.0=py39hb070fc8_0
|
||||||
- pip:
|
- pip:
|
||||||
- numpy==1.23.5
|
- numpy==1.23.5
|
||||||
- nvidia-cudnn-cu11==8.6.0.163
|
- nvidia-cudnn-cu11==8.6.0.163
|
||||||
- tensorflow==2.12.0
|
- tensorflow==2.12.0
|
||||||
- tqdm==4.65.1.dev3+g5587f0d
|
|
||||||
57
example.py
57
example.py
@@ -7,19 +7,19 @@ results with a number of independent runs of standard Backpropagation algorithm
|
|||||||
@author Mike Holcomb (mjh170630@utdallas.edu)
|
@author Mike Holcomb (mjh170630@utdallas.edu)
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import tensorflow as tf
|
||||||
from sklearn.datasets import load_iris
|
from sklearn.datasets import load_iris
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.model_selection import train_test_split
|
||||||
import tensorflow as tf
|
|
||||||
from tensorflow import keras
|
from tensorflow import keras
|
||||||
from tensorflow.keras.models import Sequential
|
|
||||||
from tensorflow.keras.layers import Dense
|
from tensorflow.keras.layers import Dense
|
||||||
|
from tensorflow.keras.models import Sequential
|
||||||
|
|
||||||
from psokeras import Optimizer
|
from psokeras import Optimizer
|
||||||
|
|
||||||
N = 50 # number of particles
|
N = 50 # number of particles
|
||||||
STEPS = 500 # number of steps
|
STEPS = 500 # number of steps
|
||||||
LOSS = 'mse' # Loss function
|
LOSS = "mse" # Loss function
|
||||||
BATCH_SIZE = 32 # Size of batches to train on
|
BATCH_SIZE = 32 # Size of batches to train on
|
||||||
|
|
||||||
|
|
||||||
def build_model(loss):
|
def build_model(loss):
|
||||||
@@ -30,12 +30,11 @@ def build_model(loss):
|
|||||||
:return: Keras dense model of predefined structure
|
:return: Keras dense model of predefined structure
|
||||||
"""
|
"""
|
||||||
model = Sequential()
|
model = Sequential()
|
||||||
model.add(Dense(4, activation='sigmoid', input_dim=4, use_bias=True))
|
model.add(Dense(4, activation="sigmoid", input_dim=4, use_bias=True))
|
||||||
model.add(Dense(4, activation='sigmoid', use_bias=True))
|
model.add(Dense(4, activation="sigmoid", use_bias=True))
|
||||||
model.add(Dense(3, activation='softmax', use_bias=True))
|
model.add(Dense(3, activation="softmax", use_bias=True))
|
||||||
|
|
||||||
model.compile(loss=loss,
|
model.compile(loss=loss, optimizer="adam")
|
||||||
optimizer='adam')
|
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
@@ -52,11 +51,10 @@ def vanilla_backpropagation(x_train, y_train):
|
|||||||
|
|
||||||
for i in range(N):
|
for i in range(N):
|
||||||
model_s = build_model(LOSS)
|
model_s = build_model(LOSS)
|
||||||
model_s.fit(x_train, y_train,
|
model_s.fit(x_train, y_train, epochs=STEPS, batch_size=BATCH_SIZE, verbose=0)
|
||||||
epochs=STEPS,
|
train_score = model_s.evaluate(
|
||||||
batch_size=BATCH_SIZE,
|
x_train, y_train, batch_size=BATCH_SIZE, verbose=0
|
||||||
verbose=0)
|
)
|
||||||
train_score = model_s.evaluate(x_train, y_train, batch_size=BATCH_SIZE, verbose=0)
|
|
||||||
if train_score < best_score:
|
if train_score < best_score:
|
||||||
best_model = model_s
|
best_model = model_s
|
||||||
best_score = train_score
|
best_score = train_score
|
||||||
@@ -66,11 +64,13 @@ def vanilla_backpropagation(x_train, y_train):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
# Section I: Build the data set
|
# Section I: Build the data set
|
||||||
iris = load_iris()
|
iris = load_iris()
|
||||||
x_train, x_test, y_train, y_test = train_test_split(iris.data,
|
x_train, x_test, y_train, y_test = train_test_split(
|
||||||
keras.utils.to_categorical(iris.target, num_classes=None),
|
iris.data,
|
||||||
test_size=0.5,
|
keras.utils.to_categorical(iris.target, num_classes=None),
|
||||||
random_state=0,
|
test_size=0.5,
|
||||||
stratify=iris.target)
|
random_state=0,
|
||||||
|
stratify=iris.target,
|
||||||
|
)
|
||||||
|
|
||||||
# Section II: First run the backpropagation simulation
|
# Section II: First run the backpropagation simulation
|
||||||
model_s = vanilla_backpropagation(x_train=x_train, y_train=y_train)
|
model_s = vanilla_backpropagation(x_train=x_train, y_train=y_train)
|
||||||
@@ -84,13 +84,14 @@ if __name__ == "__main__":
|
|||||||
model_p = build_model(LOSS)
|
model_p = build_model(LOSS)
|
||||||
|
|
||||||
# Instantiate optimizer with model, loss function, and hyperparameters
|
# Instantiate optimizer with model, loss function, and hyperparameters
|
||||||
pso = Optimizer(model=model_p,
|
pso = Optimizer(
|
||||||
loss=LOSS,
|
model=model_p,
|
||||||
n=N, # Number of particles
|
loss=LOSS,
|
||||||
acceleration=1.0, # Contribution of recursive particle velocity (acceleration)
|
n=N, # Number of particles
|
||||||
local_rate=0.6, # Contribution of locally best weights to new velocity
|
acceleration=1.0, # Contribution of recursive particle velocity (acceleration)
|
||||||
global_rate=0.4 # Contribution of globally best weights to new velocity
|
local_rate=0.6, # Contribution of locally best weights to new velocity
|
||||||
)
|
global_rate=0.4, # Contribution of globally best weights to new velocity
|
||||||
|
)
|
||||||
|
|
||||||
# Train model on provided data
|
# Train model on provided data
|
||||||
pso.fit(x_train, y_train, steps=STEPS, batch_size=BATCH_SIZE)
|
pso.fit(x_train, y_train, steps=STEPS, batch_size=BATCH_SIZE)
|
||||||
|
|||||||
5
iris.py
5
iris.py
@@ -4,15 +4,14 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
|||||||
|
|
||||||
import gc
|
import gc
|
||||||
|
|
||||||
import numpy as np
|
|
||||||
import tensorflow as tf
|
|
||||||
from pso import Optimizer
|
|
||||||
from sklearn.datasets import load_iris
|
from sklearn.datasets import load_iris
|
||||||
from sklearn.model_selection import train_test_split
|
from sklearn.model_selection import train_test_split
|
||||||
from tensorflow import keras
|
from tensorflow import keras
|
||||||
from tensorflow.keras import layers
|
from tensorflow.keras import layers
|
||||||
from tensorflow.keras.models import Sequential
|
from tensorflow.keras.models import Sequential
|
||||||
|
|
||||||
|
from pso import Optimizer
|
||||||
|
|
||||||
|
|
||||||
def make_model():
|
def make_model():
|
||||||
model = Sequential()
|
model = Sequential()
|
||||||
|
|||||||
26
iris_tf.py
26
iris_tf.py
@@ -1,5 +1,6 @@
|
|||||||
import os
|
import os
|
||||||
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
|
|
||||||
|
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
||||||
|
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
@@ -11,22 +12,22 @@ if gpus:
|
|||||||
except RuntimeError as e:
|
except RuntimeError as e:
|
||||||
print(e)
|
print(e)
|
||||||
|
|
||||||
|
from sklearn.datasets import load_iris
|
||||||
|
from sklearn.model_selection import train_test_split
|
||||||
from tensorflow import keras
|
from tensorflow import keras
|
||||||
from tensorflow.keras import layers
|
from tensorflow.keras import layers
|
||||||
from tensorflow.keras.models import Sequential
|
from tensorflow.keras.models import Sequential
|
||||||
|
|
||||||
from sklearn.datasets import load_iris
|
|
||||||
from sklearn.model_selection import train_test_split
|
|
||||||
|
|
||||||
|
|
||||||
def make_model():
|
def make_model():
|
||||||
model = Sequential()
|
model = Sequential()
|
||||||
model.add(layers.Dense(10, activation='relu', input_shape=(4,)))
|
model.add(layers.Dense(10, activation="relu", input_shape=(4,)))
|
||||||
model.add(layers.Dense(10, activation='relu'))
|
model.add(layers.Dense(10, activation="relu"))
|
||||||
model.add(layers.Dense(3, activation='softmax'))
|
model.add(layers.Dense(3, activation="softmax"))
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|
||||||
def load_data():
|
def load_data():
|
||||||
iris = load_iris()
|
iris = load_iris()
|
||||||
x = iris.data
|
x = iris.data
|
||||||
@@ -34,18 +35,21 @@ def load_data():
|
|||||||
|
|
||||||
y = keras.utils.to_categorical(y, 3)
|
y = keras.utils.to_categorical(y, 3)
|
||||||
|
|
||||||
x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, shuffle=True, stratify=y)
|
x_train, x_test, y_train, y_test = train_test_split(
|
||||||
|
x, y, test_size=0.2, shuffle=True, stratify=y
|
||||||
|
)
|
||||||
|
|
||||||
return x_train, x_test, y_train, y_test
|
return x_train, x_test, y_train, y_test
|
||||||
|
|
||||||
|
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
model = make_model()
|
model = make_model()
|
||||||
x_train, x_test, y_train, y_test = load_data()
|
x_train, x_test, y_train, y_test = load_data()
|
||||||
print(x_train.shape, y_train.shape)
|
print(x_train.shape, y_train.shape)
|
||||||
|
|
||||||
loss = ['categorical_crossentropy', 'accuracy','mse']
|
loss = ["categorical_crossentropy", "accuracy", "mse"]
|
||||||
metrics = ['accuracy']
|
metrics = ["accuracy"]
|
||||||
|
|
||||||
model.compile(optimizer='sgd', loss=loss[0], metrics=metrics[0])
|
model.compile(optimizer="sgd", loss=loss[0], metrics=metrics[0])
|
||||||
model.fit(x_train, y_train, epochs=200, batch_size=32, validation_split=0.2)
|
model.fit(x_train, y_train, epochs=200, batch_size=32, validation_split=0.2)
|
||||||
model.evaluate(x_test, y_test, batch_size=32)
|
model.evaluate(x_test, y_test, batch_size=32)
|
||||||
7
mnist.py
7
mnist.py
@@ -5,14 +5,11 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
|||||||
|
|
||||||
import gc
|
import gc
|
||||||
|
|
||||||
import tensorflow as tf
|
|
||||||
from keras import backend as K
|
|
||||||
from keras.datasets import mnist
|
from keras.datasets import mnist
|
||||||
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
|
from keras.layers import Conv2D, Dense, Dropout, Flatten, MaxPooling2D
|
||||||
from keras.models import Sequential
|
from keras.models import Sequential
|
||||||
|
|
||||||
from pso import Optimizer
|
from pso import Optimizer
|
||||||
from tensorflow import keras
|
|
||||||
from tqdm import tqdm
|
|
||||||
|
|
||||||
|
|
||||||
def get_data():
|
def get_data():
|
||||||
@@ -24,6 +21,7 @@ def get_data():
|
|||||||
|
|
||||||
print(f"x_train : {x_train[0].shape} | y_train : {y_train[0].shape}")
|
print(f"x_train : {x_train[0].shape} | y_train : {y_train[0].shape}")
|
||||||
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
|
print(f"x_test : {x_test[0].shape} | y_test : {y_test[0].shape}")
|
||||||
|
|
||||||
return x_train, y_train, x_test, y_test
|
return x_train, y_train, x_test, y_test
|
||||||
|
|
||||||
|
|
||||||
@@ -92,6 +90,7 @@ if __name__ == "__main__":
|
|||||||
Dispersion=False,
|
Dispersion=False,
|
||||||
check_point=25,
|
check_point=25,
|
||||||
)
|
)
|
||||||
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
print(e)
|
||||||
finally:
|
finally:
|
||||||
|
|||||||
@@ -73,7 +73,11 @@ class Optimizer:
|
|||||||
self.g_best = None # 최고 점수를 받은 가중치
|
self.g_best = None # 최고 점수를 받은 가중치
|
||||||
self.g_best_ = None # 최고 점수를 받은 가중치 - 값의 분산을 위한 변수
|
self.g_best_ = None # 최고 점수를 받은 가중치 - 값의 분산을 위한 변수
|
||||||
self.avg_score = 0 # 평균 점수
|
self.avg_score = 0 # 평균 점수
|
||||||
|
|
||||||
self.save_path = None # 저장 위치
|
self.save_path = None # 저장 위치
|
||||||
|
self.renewal = "acc"
|
||||||
|
self.Dispersion = False
|
||||||
|
self.day = datetime.now().strftime("%m-%d-%H-%M")
|
||||||
|
|
||||||
negative_count = 0
|
negative_count = 0
|
||||||
|
|
||||||
@@ -225,7 +229,6 @@ class Optimizer:
|
|||||||
self.save_path = save_path
|
self.save_path = save_path
|
||||||
if not os.path.exists(save_path):
|
if not os.path.exists(save_path):
|
||||||
os.makedirs(save_path, exist_ok=True)
|
os.makedirs(save_path, exist_ok=True)
|
||||||
self.day = datetime.now().strftime("%m-%d-%H-%M")
|
|
||||||
except ValueError as e:
|
except ValueError as e:
|
||||||
print(e)
|
print(e)
|
||||||
sys.exit(1)
|
sys.exit(1)
|
||||||
|
|||||||
@@ -1,7 +1,6 @@
|
|||||||
import gc
|
import gc
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
|
||||||
from tensorflow import keras
|
from tensorflow import keras
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
16
xor.py
16
xor.py
@@ -5,15 +5,12 @@ os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2"
|
|||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import tensorflow as tf
|
import tensorflow as tf
|
||||||
|
|
||||||
# from pso_tf import PSO
|
|
||||||
from pso import Optimizer
|
|
||||||
from tensorflow import keras
|
from tensorflow import keras
|
||||||
from tensorflow.keras import layers
|
from tensorflow.keras import layers
|
||||||
|
from tensorflow.keras.layers import Dense
|
||||||
from tensorflow.keras.models import Sequential
|
from tensorflow.keras.models import Sequential
|
||||||
|
|
||||||
print(tf.__version__)
|
from pso import Optimizer
|
||||||
print(tf.config.list_physical_devices())
|
|
||||||
|
|
||||||
|
|
||||||
def get_data():
|
def get_data():
|
||||||
@@ -23,12 +20,9 @@ def get_data():
|
|||||||
|
|
||||||
|
|
||||||
def make_model():
|
def make_model():
|
||||||
leyer = []
|
model = Sequential()
|
||||||
leyer.append(layers.Dense(2, activation="sigmoid", input_shape=(2,)))
|
model.add(layers.Dense(2, activation="sigmoid", input_shape=(2,)))
|
||||||
# leyer.append(layers.Dense(2, activation='sigmoid'))
|
model.add(layers.Dense(1, activation="sigmoid"))
|
||||||
leyer.append(layers.Dense(1, activation="sigmoid"))
|
|
||||||
|
|
||||||
model = Sequential(leyer)
|
|
||||||
|
|
||||||
return model
|
return model
|
||||||
|
|
||||||
|
|||||||
Reference in New Issue
Block a user