import mechanize
from bs4 import BeautifulSoup
import urllib3
import http.cookiejar
import re
import time
import random
import os
from concurrent import futures
import itertools
import pickle

def select_form (form):
    return form.attrs.get('class',None) == 'user_form'

def initial_br (cj):
    br = mechanize.Browser()
    br.set_cookiejar(cj)
    br.set_handle_robots(False)
    br.addheaders = [('User-agent','Mozilla/5.0 (X11; Linux i686; rv:64.0) Gecko/20100101 Firefox/64.0')]
    return br

def login (browser, cookiejar):
    # Handle login form
    print("Detected Login page")
    browser.select_form(predicate=select_form)
    print (browser.form)
    browser.form['username'] = 'jgaztelu'
    browser.form['password'] = 'Hzbk1429!'
    browser.submit()
    cookiejar.save (ignore_discard=True, ignore_expires=True)

def extract_links (pagenr, cj):
    ret_list = []
    quotation = re.compile(r'"([^"]*)"') #Regex to get text between ""
    br = initial_br(cj)
    # Open page and extract highest quality links
    print ("Scraping page nr. " + str(page))
    br.open("https://en.ersties.com/videos?p=" + str(page))
    soup = BeautifulSoup(br.response().read(),features="html5lib")
    result_set = soup.find_all(href=re.compile("1080"))
    for i in result_set:
        si=quotation.findall(str(i))
        ret_list.append(si[0])
    #print(ret_list)
    time.sleep(0.1)
    return ret_list
def download_video (file, dest_folder, cj):
    filename = file[file.rfind("/")+1:file.find("?")] # Extract filename from URL
    url = web_header + file
    dest_file = dest_folder + filename
    command = 'wget -O ' + filename + ' --load-cookies=cookies.txt ' + url
    print ("Download URL: " + url)
    print ("Dest: " + dest_file)
    if (os.path.exists (dest_file) == 0): # Check that file doesn't already exist
        #br.retrieve(url,dest_file)
        os.system(command)
        rand_num = random.randint(10,45)
        print("Waiting random time for next video:" + str(rand_num) + "s")
        time.sleep(rand_num)
    else:
        print("File already existe, skipping...")

    return 1

##################################################################################
web_header = 'https://en.ersties.com'
dest_folder = "/data/Ersties/"
link_file = "links.txt"
future_list = []
video_list = []
threads = []
quotation = re.compile(r'"([^"]*)"') #Regex to get text between ""
rand_num = 20
downloaded = 0
skipped = 0
# Initial setup and open webpage
cj = http.cookiejar.MozillaCookieJar(filename="cookies.txt")
cj.load(ignore_discard=True, ignore_expires=True)

br = initial_br(cj)
br.open("https://en.ersties.com/videos")
current_url = br.geturl()
if current_url.find("login") != -1:
    login(br,cj)

with futures.ThreadPoolExecutor(max_workers=2) as executor:
    for page in range(1,307):
        video_links = executor.submit(extract_links,page,cj)
        video_list.append(video_links.result()[0])

print ("Video list ready. Total links: " + str(len(video_list)))
print (video_list)
with futures.ThreadPoolExecutor(max_workers=4) as downloader:
    for download_result in downloader.map(download_video,video_list,itertools.repeat(dest_folder), itertools.repeat(cj)):
        if download_result == 1:
            downloaded += 1
            print("Downloaded: " + str(downloaded))
        else:
            skipped += 1

print ("Downloaded: " + str(downloaded) + "Skipped: " + str(skipped))

# Iterate video pages
# for page in range(1,307):
#     # Open page and extract highest quality links
#     br.open("https://en.ersties.com/videos?p=" + str(page))
#     soup = BeautifulSoup(br.response().read(),features="html5lib")
#     result_set = soup.find_all(href=re.compile("1080"))
#     for i in result_set:
#         si = quotation.findall(str(i))
#         print(si[0])
#         video_list.append(si[0])
#
#     # Generate download URL and retrieve file
#     for file in video_list:
#         filename = file[file.rfind("/")+1:file.find("?")] # Extract filename from URL
#         url = web_header + file
#         dest_file = dest_folder + filename
#         print ("Download URL: " + url)
#         print ("Dest: " + dest_file)
#         if (os.path.exists (dest_file) == 0): # Check that file doesn't already exist
#             br.retrieve(url,dest_file)
#             downloaded+=1
#             rand_num = random.randint(10,45)
#             print("Waiting random time for next video:" + str(rand_num) + "s")
#             time.sleep(rand_num)
#         else:
#             print("File already existe, skipping...")
#
#         print("Total downloaded: " + str(downloaded))
