songs-lyrics-generator/markov_model.py

103 lines
4.1 KiB
Python
Raw Normal View History

2023-03-26 13:22:02 +00:00
import random
import re
2023-03-28 15:25:17 +00:00
from nltk import SyllableTokenizer
from nltk.tokenize import word_tokenize
import pandas as pd
2023-03-26 13:22:02 +00:00
def clean_data(name):
document = pd.read_csv(name, usecols=["Lyrics"])
rows = document["Lyrics"].values.tolist()
dataset = []
for lyric in rows:
if isinstance(lyric, str):
lyric = lyric.lower()
lyric = re.sub(r"[,.\"\'!@#$%^&*(){}?/;`~:<>+=-\\]", "", lyric)
lyric = re.sub(r"\([A-Za-z0-9:\s\.\?\,\&\*]+\)", "", lyric)
lyric = re.sub(r"\[[A-Za-z0-9:\s\.\?\,\&\*]+\]", "", lyric)
lyric = re.sub(r"[A-Za-z0-9]+::", "", lyric)
lyric = re.sub(r"[A-Za-z0-9]+:", "", lyric)
lyric = re.sub(r"/[A-Za-z0-9]+", "", lyric)
lyric = re.sub(r"x[0-9]", "", lyric)
forbidden_words = ['chorus', 'refrain', 'coda', 'solo', 'intro', 'introduction', 'verse', 'pre-chorus',
'post-chorus', 'bridge', 'outro', 'ref']
tokens = word_tokenize(lyric)
words = [word for word in tokens if word.isalpha()]
words = [word for word in words if word not in forbidden_words]
dataset += words
print(name.split('\\')[-1], "number of words in cleaned data: ", len(dataset))
return dataset
2023-03-28 15:25:17 +00:00
def create_markov_model(dataset, n_gram):
2023-03-26 13:22:02 +00:00
markov_model = {}
2023-03-28 13:08:23 +00:00
for i in range(len(dataset) - 1 - 2 * n_gram):
2023-03-26 13:22:02 +00:00
current_state, next_state = "", ""
for j in range(n_gram):
current_state += dataset[i + j] + " "
next_state += dataset[i + n_gram]
2023-03-26 13:22:02 +00:00
current_state = current_state[:-1]
if current_state not in markov_model:
markov_model[current_state] = {}
markov_model[current_state][next_state] = 1
else:
if next_state in markov_model[current_state]:
markov_model[current_state][next_state] += 1
else:
markov_model[current_state][next_state] = 1
for current_state, transition in markov_model.items():
total = sum(transition.values())
for state, count in transition.items():
markov_model[current_state][state] = count / total
return markov_model
def default_next_state(markov_model, current_state, lyrics):
next_state = random.choices(list(markov_model[current_state].keys()),
list(markov_model[current_state].values()))
lyrics += next_state[0] + " "
n_gram = len(current_state.split(" "))
current_state = ""
for i in range(n_gram + 1, 1, -1):
current_state += lyrics.split(" ")[-i] + " "
current_state = current_state[:-1]
return current_state, lyrics
def rhyming_next_state(rime_states, current_state, lyrics):
next_state = random.choices(list(rime_states.keys()),
list(rime_states.values()))
lyrics += next_state[0] + " "
n_gram = len(current_state.split(" "))
current_state = ""
for i in range(n_gram + 1, 1, -1):
current_state += lyrics.split(" ")[-i] + " "
current_state = current_state[:-1]
return current_state, lyrics
def generate_lyrics(markov_model, start, limit, try_rhyme, rime):
2023-03-26 13:22:02 +00:00
n = 0
current_state = start
lyrics = ""
lyrics += current_state + " "
lyrics = lyrics[0].upper() + lyrics[1:]
2023-03-26 13:22:02 +00:00
while n < limit:
if n == limit - 1 and try_rhyme is True:
2023-03-28 15:25:17 +00:00
rime = rime.split(" ")[-1]
tk = SyllableTokenizer()
rime_syllab = tk.tokenize(rime)[-1]
rime_states = {}
for state, probability in markov_model[current_state].items():
syllab = tk.tokenize(state)[-1]
if rime_syllab == syllab and rime != state:
2023-03-28 15:25:17 +00:00
rime_states.update({state: probability})
if rime_states:
current_state, lyrics = rhyming_next_state(rime_states, current_state, lyrics)
2023-03-28 15:25:17 +00:00
else:
current_state, lyrics = default_next_state(markov_model, current_state, lyrics)
2023-03-28 15:25:17 +00:00
else:
current_state, lyrics = default_next_state(markov_model, current_state, lyrics)
2023-03-26 13:22:02 +00:00
n += 1
2023-03-28 13:08:23 +00:00
return lyrics, current_state