[Bitcoin] マイニング

from hashlib import sha256
text = "ABC"
print(sha256(text.encode('ascii')).hexdigest())

$ python3 main.py
b5d4045c3f466fa91fe2cc6abe79232a1a57cdf104f7a26e716e0a1e2789df78

from hashlib import sha256
MAX_NONCE = 10000000000

def SHA256(text):
	return sha256(text.encode('ascii')).hexdigest()

def mine(block_number, transactions, previous_hash, prefix_zeros):
    prefix_str = '0'*prefix_zeros
    for nonce in range(MAX_NONCE):
        text = str(block_number) + transactions + previous_hash + str(nonce)
        new_hash = SHA256(text)
        if new_hash.startswith(prefix_str):
            print(f"Yay! Successfully mined bitcoins with nonce value:{nonce}")
            return new_hash

    raise BaseException(f"Couldn't find correct has after trying {MAX_NONCE} times")


if __name__ == '__main__':
	transactions='''
	Bob->Alice->20,
	Ken->Ryu->45
	'''
	difficulty=6
	new_hash = mine(5, transactions, '00000000000008a3a41b85b8b29ad444def299fee21793cd8b9e567eab02cd81', difficulty)
	print(new_hash)

bitcoin hardware
https://www.buybitcoinworldwide.com/mining/hardware/

bitcoin explore
https://www.blockchain.com/explorer
https://www.blockchain.com/btc/blocks?page=1

bitcoin block1
https://www.blockchain.com/btc/block/1

なるほど、前後の処理が知りたいのう

パスポートの文字をtesseractで読み込む

try:
    from PIL import Image
except ImportError:
    import Image
import pytesseract


FILENAME = 'test.jpg'

text = pytesseract.image_to_string(Image.open(FILENAME))
lines = text.split("\n")
for line in lines:
	if '<<<' in line:
		t = line.replace(' ', '')
		print(t)
test1.png
P<GBRUK<SPECIMEN<<ANGELA<ZOE<<<<<<<<<<<<<<<<
5334755143GBR8812049F2509286<<<<<<<<<<c<<<04

test2.png
PDCYPPOLITIS<<ZINONAS<<<<<<<<<<<<<<KKKKKKKKK
FOOOOOD005CYP8012148M3006151<<<<<<<<<<<<<<02

test3.png
PTNOROESTENBYEN<<AASAMUND<SPECIMEN<<<<<<<<<<
FHCO023539NOR5604230M2506126<<<<<<<<<<<<<<00

なぜか変な文字列が混ざりますね

def convert(Filename):
	img = Image.open(Filename)
	img=img.convert('RGB')
	size=img.size
	img2=Image.new('RGB',size)
	 
	border=110
	 
	for x in range(size[0]):
	    for y in range(size[1]):
	        r,g,b=img.getpixel((x,y))
	        if r > border or g > border or b > border:
	            r = 255
	            g = 255
	            b = 255
	        img2.putpixel((x,y),(r,g,b))

	return img2

text = pytesseract.image_to_string(convert('test.jpg'))
lines = text.split("\n")
for line in lines:
	if '<<<' in line:
		print(line)

うーむ、ちょっと違うかな

VPSでLet’s encryptを導入する

IPアドレス指定でアクセスする場合のSSLには対応していないので、独自ドメインをあらかじめ取得・設定しておく必要がある

$ sudo a2enmod ssl
$ sudo a2ensite default-ssl
$ service apache2 reload

$ sudo vi /etc/apache2/sites-available/virtual.host.conf

<VirtualHost *:80>
DocumentRoot /var/www/node
ServerName hoge.site
#ServerAlias www.hoge.site
# Other directives here

RewriteEngine on
RewriteCond %{SERVER_NAME} =hoge.site [OR]
RewriteCond %{SERVER_NAME} =www.hoge.site
RewriteRule ^ https://%{SERVER_NAME}%{REQUEST_URI} [END,NE,R=permanent]
</VirtualHost>

<VirtualHost _default_:443>
DocumentRoot /var/www/node
ServerName hoge.site
ServerAlias www.hoge.site
# Other directives here

SSLCertificateFile /etc/letsencrypt/live/hoge.site/fullchain.pem
SSLCertificateKeyFile /etc/letsencrypt/live/hoge.site/privkey.pem
Include /etc/letsencrypt/options-ssl-apache.conf
</VirtualHost>

$ sudo apt install firewalld
$ sudo firewall-cmd –add-service=http –zone=public –permanent
$ sudo firewall-cmd –add-service=https –zone=public –permanent
$ sudo systemctl restart firewalld
$ sudo firewall-cmd –list-all
services: dhcpv6-client http https ssh

$ sudo apt-get install certbot python3-certbot-apache
$ certbot –apache -d hoge-test.site

ほう

Pythonでインスタンスセグメンテーション

$ sudo apt -y install python3-six python3-wheel python3-numpy python3-grpcio python3-protobuf python3-termcolor python3-typing-extensions python3-h5py python3-markdown python3-werkzeug python3-requests-oauthlib python3-rsa python3-cachetools python3-google-auth
$ sudo apt -y install python3-numpy python3-sklearn python3-matplotlib python3-seaborn
$ sudo pip3 install -U tensorflow tensorflow_datasets
$ pip3 install pixellib
$ curl -LO https://github.com/ayoolaolafenwa/PixelLib/releases/download/1.2/mask_rcnn_coco.h5

sample2.jpegをDLしてdataフォルダに格納
https://pixellib.readthedocs.io/en/latest/Image_instance.html

import pixellib
from pixellib.instance import instance_segmentation

segment_image = instance_segmentation()
segment_image.load_model("data/mask_rcnn_coco.h5")
segment_image.segmentImage("data/sample2.jpeg", output_image_name = "image_new.jpg")

どういう仕組みなのかは理解したが、イマイチ上手くいかんな…
環境のせいか…

[話者認識] 一つの音声データから複数のデータを取り出して精度向上

短時間フーリエ変換、MFCCを利用する

import numpy as np
import librosa
import librosa.display
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import svm
from scipy import fftpack

# 音声データを読み込む
speakers = {'kirishima' : 0, 'suzutsuki' : 1, 'belevskaya' : 2}

# 特徴量を返す
def get_feat(file_name):
    a, sr = librosa.load(file_name)
    y = np.abs(librosa.stft(a))
    plt.figure(figsize=(10, 4))
    librosa.display.specshow(librosa.amplitude_to_db(y, ref=np.max), y_axis='log', x_axis='time', sr=sr)
    plt.colorbar(format='%+2.0fdB')
    plt.tight_layout()
    return y

# 特徴量と分類のラベル済みのラベルの組を返す
def get_data(dir_name):
    data_X = []
    data_y = []
    for file_name in sorted(os.listdir(path=dir_name)):
        print("read: {}".format(file_name))
        speaker = file_name[0:file_name.index('_')]
        data_X.append(get_feat(os.path.join(dir_name, file_name)))
        data_y.append((speakers[speaker], file_name))
        
    return (np.array(data_X), np.array(data_y))

# data_X, data_y = get_data('voiceset')
get_feat('sample/hi.wav')
get_feat('sample/lo.wav')

speakers = {'kirishima' : 0, 'suzutsuki' : 1, 'belevskaya' : 2}

# 特徴量を返す
def get_feat(file_name):
    a, sr = librosa.load(file_name)
    y = np.abs(librosa.stft(a))
#     plt.figure(figsize=(10, 4))
#     librosa.display.specshow(librosa.amplitude_to_db(y, ref=np.max), y_axis='log', x_axis='time', sr=sr)
#     plt.colorbar(format='%+2.0fdB')
#     plt.tight_layout()
    return y

# 特徴量と分類のラベル済みのラベルの組を返す
def get_data(dir_name):
    data_X = []
    data_y = []
    for file_name in sorted(os.listdir(path=dir_name)):
        print("read: {}".format(file_name))
        speaker = file_name[0:file_name.index('_')]
        data_X.append(get_feat(os.path.join(dir_name, file_name)))
        data_y.append((speakers[speaker], file_name))
        
    return (data_X, data_y)

data_X, data_y = get_data('voiceset')

train_X, test_X, train_y, test_y = train_test_split(data_X, data_y, random_state=11813)
print("{} -> {}, {}".format(len(data_X), len(train_X), len(test_X)))


def predict(X):
    result = clf.predict(X.T)
    return np.argmax(np.bincount(result))

ok_count = 0

for X, y in zip(test_X, test_y):
    actual = predict(X)
    expected = y[0]
    file_name = y[1]
    ok_count += 1 if actual == expected else 0
    result = 'o' if actual == expected else 'x'
    print("{} file: {}, actual: {}, expected: {}".format(result, file_name, actual, expected))

print("{}/{}".format(ok_count, len(test_X)))

MFCC

def get_feat(file_name):
    a, sr = librosa.load(file_name)
    y = librosa.feature.mfcc(y=a, sr=sr)
#     plt.figure(figsize=(10, 4))
#     librosa.display.specshow(librosa.amplitude_to_db(y, ref=np.max), y_axis='log', x_axis='time', sr=sr)
#     plt.colorbar(format='%+2.0fdB')
#     plt.tight_layout()
    return y

o file: suzutsuki_b06.wav, actual: 1, expected: 1
o file: kirishima_04_su.wav, actual: 0, expected: 0
o file: kirishima_c01.wav, actual: 0, expected: 0
o file: belevskaya_b04.wav, actual: 2, expected: 2
o file: belevskaya_b14.wav, actual: 2, expected: 2
o file: kirishima_b04.wav, actual: 0, expected: 0
o file: suzutsuki_b08.wav, actual: 1, expected: 1
o file: belevskaya_b07.wav, actual: 2, expected: 2
o file: suzutsuki_b03.wav, actual: 1, expected: 1
o file: belevskaya_b10.wav, actual: 2, expected: 2
o file: kirishima_b01.wav, actual: 0, expected: 0
o file: belevskaya_07_su.wav, actual: 2, expected: 2
12/12

MFCC凄すぎんだろこれ

[話者認識] フーリエ変換で精度向上

元の波形を周波数ごとに分ける -> 周波数ごとに集計したもの:パワースペクトル
時間を考慮せず、周波数に対応する数値として捉える

import numpy as np
import librosa
import librosa.display
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import svm
from scipy import fftpack

# 音声データを読み込む
speakers = {'kirishima' : 0, 'suzutsuki' : 1, 'belevskaya' : 2}

# 特徴量を返す
def get_feat(file_name):
    a, sr = librosa.load(file_name)
    fft_wave = fftpack.rfft(a, n=sr)
    fft_freq = fftpack.rfftfreq(n=sr, d=1/sr)
    y = librosa.amplitude_to_db(fft_wave, ref=np.max)
    plt.plot(fft_freq, y)
    plt.show()
    return y

# 特徴量と分類のラベル済みのラベルの組を返す
def get_data(dir_name):
    data_X = []
    data_y = []
    for file_name in sorted(os.listdir(path=dir_name)):
        print("read: {}".format(file_name))
        speaker = file_name[0:file_name.index('_')]
        data_X.append(get_feat(os.path.join(dir_name, file_name)))
        data_y.append((speakers[speaker], file_name))
        
    return (np.array(data_X), np.array(data_y))

get_feat('sample/hi.wav')
get_feat('sample/lo.wav')

横軸が周波数
hi

low

# 教師データとテストデータに分ける
train_X, test_X, train_y, test_y = train_test_split(data_X, data_y, random_state=813)
print("{} -> {}, {}".format(len(data_X), len(train_X), len(test_X)))

clf = svm.SVC(gamma=0.0000001, C=10)
clf.fit(train_X, train_y.T[0])

ok_count = 0

for X, y in zip(test_X, test_y):
    actual = clf.predict(np.array([X]))[0]
    expected = y[0]
    file_name = y[1]
    ok_count += 1 if actual == expected else 0
    result = 'o' if actual == expected else 'x'
    print("{} file: {}, actual: {}, expected: {}".format(result, file_name, actual, expected))

print("{}/{}".format(ok_count, len(test_X)))

o file: belevskaya_b11.wav, actual: 2, expected: 2
o file: kirishima_c01.wav, actual: 0, expected: 0
x file: kirishima_c09.wav, actual: 2, expected: 0
x file: kirishima_04_su.wav, actual: 2, expected: 0
o file: belevskaya_b14.wav, actual: 2, expected: 2
o file: kirishima_b07.wav, actual: 0, expected: 0
x file: suzutsuki_b06.wav, actual: 2, expected: 1
x file: kirishima_c02.wav, actual: 2, expected: 0
o file: kirishima_b03.wav, actual: 0, expected: 0
o file: suzutsuki_b08.wav, actual: 1, expected: 1
o file: suzutsuki_b02.wav, actual: 1, expected: 1
o file: kirishima_b05.wav, actual: 0, expected: 0
8/12

精度が上がっている

[話者認識] 基礎1

import librosa
import librosa.display
import matplotlib.pyplot as plt

a, sr = librosa.load('voiceset/kirishima_b01.wav')
librosa.display.waveplot(a, sr)

print(a)
print(len(a))
print(sr)

print(a)
print(len(a))
print(sr)
print(a)
print(len(a))
print(sr)
[ 1.3803428e-06 -2.3314392e-06 7.8938438e-06 … 0.0000000e+00
0.0000000e+00 0.0000000e+00]
132300
22050 // 波形のデータが1秒間に幾つの振幅を持つか

### 高音と低音の比較

a, sr = librosa.load('sample/hi.wav')
librosa.display.waveplot(a, sr)
plt.show()

a, sr = librosa.load('sample/lo.wav')
librosa.display.waveplot(a, sr)
plt.show()

highはlowよりも細かく振動している
振動数が多いと音が高くなる傾向にある
この特性を元に、SVNに与えて話者認識を行う

import numpy as np
import librosa
import librosa.display
import os
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn import svm

dir_name = 'voiceset'
for file_name in sorted(os.listdir(path=dir_name)):
    print("read: {}".format(file_name))
    a, sr = librosa.load(os.path.join(dir_name, file_name))
    print(a.shape)
    librosa.display.waveplot(a, sr)
    plt.show()

同じ人の音声でも全く異なる波形になる

speakers = {'kirishima': 0, 'suzutsuki': 1, 'belevskaya': 2}


def get_data(dir_name):
    data_X = []
    data_y = []
    for file_name in sorted(os.listdir(path=dir_name)):
        print("read: {}".format(file_name))
        a, sr = librosa.load(os.path.join(dir_name, file_name))
        print(a.shape)
        speaker = file_name[0:file_name.index('_')]
        data_X.append(a)
        data_y.append((speakers[speaker], file_name))
    
    return (np.array(data_X), np.array(data_y))
    
data_X, data_y = get_data("voiceset")

SVMに学習させるには、要素数を同じ数に揃えなければならない

speakers = {'kirishima': 0, 'suzutsuki': 1, 'belevskaya': 2}

def get_feat(file_name):
    a, sr = librosa.load(file_name)
    return a[0:5000]

def get_data(dir_name):
    data_X = []
    data_y = []
    for file_name in sorted(os.listdir(path=dir_name)):
        print("read: {}".format(file_name))
        speaker = file_name[0:file_name.index('_')]
        data_X.append(get_feat(os.path.join(dir_name, file_name)))
        data_y.append((speakers[speaker], file_name))
    
    return (np.array(data_X), np.array(data_y))
    
data_X, data_y = get_data("voiceset")

print("====data_X====")
print(data_X.shape)
print(data_X)
print("====data_y====")
print(data_y.shape)
print(data_y)

教師データとテストデータに分割する

train_X, test_X, train_y, test_y = train_test_split(data_X, data_y, random_state=11813)
print("{}->{}, {}".format(len(data_X), len(train_X),len(test_X)))

cvmで学習

clf = svm.SVC(gamma=0.0001, C=1)
clf.fit(train_X, train_y.T[0])

SVC(C=1, break_ties=False, cache_size=200, class_weight=None, coef0=0.0,
decision_function_shape=’ovr’, degree=3, gamma=0.0001, kernel=’rbf’,
max_iter=-1, probability=False, random_state=None, shrinking=True,
tol=0.001, verbose=False)

テストデータの分類

clf.predict(np.array([test_X[0]]))

ok_count = 0

for X, y in zip(test_X, test_y):
    actual = clf.predict(np.array([X]))[0]
    expected = y[0]
    file_name = y[1]
    ok_count += 1 if actual == expected else 0
    result = 'o' if actual == expected else 'x'
    print("{} file: {}, actual:{}, expected: {}".format(result, file_name, actual, expected))
    
print("{}/{}".format(ok_count, len(test_X)))

x file: suzutsuki_b06.wav, actual:2, expected: 1
x file: kirishima_04_su.wav, actual:2, expected: 0
x file: kirishima_c01.wav, actual:2, expected: 0
o file: belevskaya_b04.wav, actual:2, expected: 2
o file: belevskaya_b14.wav, actual:2, expected: 2
x file: kirishima_b04.wav, actual:2, expected: 0
x file: suzutsuki_b08.wav, actual:2, expected: 1
o file: belevskaya_b07.wav, actual:2, expected: 2
x file: suzutsuki_b03.wav, actual:2, expected: 1
o file: belevskaya_b10.wav, actual:2, expected: 2
x file: kirishima_b01.wav, actual:2, expected: 0
o file: belevskaya_07_su.wav, actual:2, expected: 2
5/12

予測の精度を上げる必要がある

[音声認識] librosaで音声の波形を描写したい

まず、mp3の音声ファイルを用意します。

ubuntuにlibrosaをinstall
$ pip3 install librosa
$ sudo apt-get install libsndfile1
$ sudo apt install ffmpeg

import librosa
import numpy as np
import matplotlib.pyplot as plt

file_name = "./test.mp3"
y, sr = librosa.load(str(file_name))
time = np.arange(0, len(y)) / sr

plt.plot(time, y)
plt.xlabel("Time(s)")
plt.ylabel("Sound Amplitude")

plt.savefig('image.jpg',dpi=100)

うおおおおおおおお、なるほど

PythonでUnitTest

import unittest

def add(a, b):
	return a + b

class TestAdd(unittest.TestCase):

	def test_add(self):
		value1 = 3
		value2 = 5
		expected = 8

		actual = add(value1, value2)
		self.assertEqual(expected, actual)

if __name__ == "__main__":
	unittest.main()

unittest.main()を実行すると、対象スクリプトのなかでスクリプト内でunittest.TestCaseを継承した全てのクラスを認識し、そのメソッドのうちtestで始まる名称を持つものが全てテストケースとして実行

なるほどー

PythonでRSA暗号を実装したい

RSAは公開鍵暗号の一つ
暗号文 = 平文^E mod N {E, N}が公開鍵
平文 = 暗号文^D mod N {D, N}が秘密鍵

E, D, Nを求めるには素数を使う
N = p * q
L = lmc(p – 1, q – 1) # lmcとは最小公倍数(Least Common Multiple)
1 < E < L gcd(E,L) = 1 # gcdとは最大公約数、 E, Lは互いに素な整数 1 < D < L (E・D) mod L = 1 # DはEとの積の余剰が1となる任意の整数

from math import gcd

def lcm(p, q):
	return (p * q) // gcd(p, q)

def generate_keys(p, q):
	N = p * q
	L = lcm(p – 1, q – 1)

	for i in range(2, L):
		if gcd(i, L) == 1:
			E = i
			break

	for i in range(2, L):
		if(E * i) % L == 1:
			D = i
			break

	return (E, N), (D, N)

def encrypt(plain_text, public_key):
	E, N = public_key
	plain_integers = [ord(char) for char in plain_text]
	encrypted_integers = [pow(i, E, N) for i in plain_integers]
	encrypted_text = ”.join(chr(i) for i in encrypted_integers)

	return encrypted_text

def decrypt(encrypted_text, private_key):
	D, N = private_key
	encrypted_integers = [ord(char) for char in encrypted_text]
	decrypted_integers = [pow(i, D, N) for i in encrypted_integers]
	decrypted_text = ”.join(chr(i) for i in decrypted_integers)

	return decrypted_text

def sanitize(encrypted_text):

	return encrypted_text.encode(‘utf-8’, ‘replace’).decode(‘utf-8’)

if __name__ == ‘__main__’:
	public_key, private_key = generate_keys(101, 3259)

	plain_text = ‘おはようございます’
	encrypted_text = encrypt(plain_text, public_key)
	decrypted_text = decrypt(encrypted_text, private_key)

	print(f”’
		秘密鍵: {public_key}
		公開鍵: {private_key}

		平文: 「{plain_text}」

		暗号文:
		「{sanitize(encrypted_text)}」

		平文(複合後):
		「{decrypted_text}」
		”'[1:-1])

なるほど、crypto currencyは暗号通貨ってことね